[2/2] hbase git commit: HBASE-20308 Dockerfile for test must include git.
HBASE-20308 Dockerfile for test must include git. Signed-off-by: Mike Drob Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/369877d0 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/369877d0 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/369877d0 Branch: refs/heads/branch-2.0 Commit: 369877d07a3b76e1e3bba95f2502ca77d57d412c Parents: 2e61252 Author: Sean Busbey Authored: Wed Mar 28 17:51:39 2018 -0500 Committer: Sean Busbey Committed: Wed Mar 28 18:39:28 2018 -0500 -- dev-support/docker/Dockerfile | 1 + 1 file changed, 1 insertion(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/369877d0/dev-support/docker/Dockerfile -- diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 49ad14d..0299fd3 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -20,6 +20,7 @@ FROM maven:3.5-jdk-8 RUN apt-get -q update && apt-get -q install --no-install-recommends -y \ + git \ bats \ findbugs \ libperl-critic-perl \
[1/2] hbase git commit: HBASE-20308 Dockerfile for test must include git.
Repository: hbase Updated Branches: refs/heads/branch-2 b72e19e3b -> dba480856 refs/heads/branch-2.0 2e612527a -> 369877d07 HBASE-20308 Dockerfile for test must include git. Signed-off-by: Mike Drob Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dba48085 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dba48085 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dba48085 Branch: refs/heads/branch-2 Commit: dba480856b91edec5d8e69236897d14bfeec8411 Parents: b72e19e Author: Sean Busbey Authored: Wed Mar 28 17:51:39 2018 -0500 Committer: Sean Busbey Committed: Wed Mar 28 18:39:06 2018 -0500 -- dev-support/docker/Dockerfile | 1 + 1 file changed, 1 insertion(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/dba48085/dev-support/docker/Dockerfile -- diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 49ad14d..0299fd3 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -20,6 +20,7 @@ FROM maven:3.5-jdk-8 RUN apt-get -q update && apt-get -q install --no-install-recommends -y \ + git \ bats \ findbugs \ libperl-critic-perl \
hbase git commit: HBASE-20308 Dockerfile for test must include git.
Repository: hbase Updated Branches: refs/heads/master 061a31fad -> d8b550fab HBASE-20308 Dockerfile for test must include git. Signed-off-by: Mike Drob Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d8b550fa Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d8b550fa Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d8b550fa Branch: refs/heads/master Commit: d8b550fabcacf3050583b514c4081adf5d087d65 Parents: 061a31f Author: Sean Busbey Authored: Wed Mar 28 17:51:39 2018 -0500 Committer: Sean Busbey Committed: Wed Mar 28 18:07:36 2018 -0500 -- dev-support/docker/Dockerfile | 1 + 1 file changed, 1 insertion(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/d8b550fa/dev-support/docker/Dockerfile -- diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile index 49ad14d..0299fd3 100644 --- a/dev-support/docker/Dockerfile +++ b/dev-support/docker/Dockerfile @@ -20,6 +20,7 @@ FROM maven:3.5-jdk-8 RUN apt-get -q update && apt-get -q install --no-install-recommends -y \ + git \ bats \ findbugs \ libperl-critic-perl \
hbase git commit: HBASE-20159 Support using separate ZK quorums for client
Repository: hbase Updated Branches: refs/heads/branch-2 78452113c -> b72e19e3b HBASE-20159 Support using separate ZK quorums for client Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b72e19e3 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b72e19e3 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b72e19e3 Branch: refs/heads/branch-2 Commit: b72e19e3b989fa46c72d356703f5c0334b83fe76 Parents: 7845211 Author: Yu Li Authored: Thu Mar 29 02:37:26 2018 +0800 Committer: Yu Li Committed: Thu Mar 29 03:20:52 2018 +0800 -- .../hbase/zookeeper/ReadOnlyZKClient.java | 8 +- .../org/apache/hadoop/hbase/HConstants.java | 17 +- .../apache/hadoop/hbase/zookeeper/ZKConfig.java | 24 +- .../org/apache/hadoop/hbase/master/HMaster.java | 47 +++- .../hbase/master/zksyncer/ClientZKSyncer.java | 241 + .../master/zksyncer/MasterAddressSyncer.java| 52 .../master/zksyncer/MetaLocationSyncer.java | 46 .../hbase/regionserver/HRegionServer.java | 14 + .../regionserver/ReplicationSink.java | 4 + .../client/TestSeparateClientZKCluster.java | 268 +++ .../hbase/master/TestMasterNoCluster.java | 37 +++ .../hbase/zookeeper/MiniZooKeeperCluster.java | 2 +- .../hadoop/hbase/zookeeper/ZKServerTool.java| 2 +- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 2 +- .../hadoop/hbase/zookeeper/ZKWatcher.java | 37 ++- 15 files changed, 781 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b72e19e3/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index d2f4763..fc2d5f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -124,7 +124,13 @@ public final class ReadOnlyZKClient implements Closeable { } public ReadOnlyZKClient(Configuration conf) { -this.connectString = ZKConfig.getZKQuorumServersString(conf); +// We might use a different ZK for client access +String clientZkQuorumServers = ZKConfig.getClientZKQuorumServersString(conf); +if (clientZkQuorumServers != null) { + this.connectString = clientZkQuorumServers; +} else { + this.connectString = ZKConfig.getZKQuorumServersString(conf); +} this.sessionTimeoutMs = conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT); this.maxRetries = conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY); this.retryIntervalMs = http://git-wip-us.apache.org/repos/asf/hbase/blob/b72e19e3/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java -- diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 372d9b1..7ee31a5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -187,6 +187,19 @@ public final class HConstants { /** Name of ZooKeeper quorum configuration parameter. */ public static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum"; + /** Name of ZooKeeper quorum configuration parameter for client to locate meta. */ + public static final String CLIENT_ZOOKEEPER_QUORUM = "hbase.client.zookeeper.quorum"; + + /** Client port of ZooKeeper for client to locate meta */ + public static final String CLIENT_ZOOKEEPER_CLIENT_PORT = + "hbase.client.zookeeper.property.clientPort"; + + /** Indicate whether the client ZK are observer nodes of the server ZK */ + public static final String CLIENT_ZOOKEEPER_OBSERVER_MODE = + "hbase.client.zookeeper.observer.mode"; + /** Assuming client zk not in observer mode and master need to synchronize information */ + public static final boolean DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE = false; + /** Common prefix of ZooKeeper configuration properties */ public static final String ZK_CFG_PROPERTY_PREFIX = "hbase.zookeeper.property."; @@ -205,7 +218,7 @@ public final class HConstants { ZK_CFG_PROPERTY_PREFIX + CLIENT_PORT_STR; /** Default client port that the zookeeper listens on */ - public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181; + public static final int DEFAULT_ZOOKEEPER_CLIENT_PORT = 2181; /** * Parameter name for the wait time for the recoverable zookeeper
hbase git commit: HBASE-20159 Support using separate ZK quorums for client
Repository: hbase Updated Branches: refs/heads/master 3b6199a27 -> 061a31fad HBASE-20159 Support using separate ZK quorums for client Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/061a31fa Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/061a31fa Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/061a31fa Branch: refs/heads/master Commit: 061a31fad1654d9ded96d118e04c14860413fa25 Parents: 3b6199a Author: Yu Li Authored: Thu Mar 29 02:37:26 2018 +0800 Committer: Yu Li Committed: Thu Mar 29 02:37:26 2018 +0800 -- .../hbase/zookeeper/ReadOnlyZKClient.java | 8 +- .../org/apache/hadoop/hbase/HConstants.java | 17 +- .../apache/hadoop/hbase/zookeeper/ZKConfig.java | 24 +- .../org/apache/hadoop/hbase/master/HMaster.java | 47 +++- .../hbase/master/zksyncer/ClientZKSyncer.java | 241 + .../master/zksyncer/MasterAddressSyncer.java| 52 .../master/zksyncer/MetaLocationSyncer.java | 46 .../hbase/regionserver/HRegionServer.java | 14 + .../regionserver/ReplicationSink.java | 4 + .../client/TestSeparateClientZKCluster.java | 268 +++ .../hbase/master/TestMasterNoCluster.java | 37 +++ .../hbase/zookeeper/MiniZooKeeperCluster.java | 2 +- .../hadoop/hbase/zookeeper/ZKServerTool.java| 2 +- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 2 +- .../hadoop/hbase/zookeeper/ZKWatcher.java | 37 ++- 15 files changed, 781 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/061a31fa/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index d2f4763..fc2d5f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -124,7 +124,13 @@ public final class ReadOnlyZKClient implements Closeable { } public ReadOnlyZKClient(Configuration conf) { -this.connectString = ZKConfig.getZKQuorumServersString(conf); +// We might use a different ZK for client access +String clientZkQuorumServers = ZKConfig.getClientZKQuorumServersString(conf); +if (clientZkQuorumServers != null) { + this.connectString = clientZkQuorumServers; +} else { + this.connectString = ZKConfig.getZKQuorumServersString(conf); +} this.sessionTimeoutMs = conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT); this.maxRetries = conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY); this.retryIntervalMs = http://git-wip-us.apache.org/repos/asf/hbase/blob/061a31fa/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java -- diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9a43e7c..9241682 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -183,6 +183,19 @@ public final class HConstants { /** Name of ZooKeeper quorum configuration parameter. */ public static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum"; + /** Name of ZooKeeper quorum configuration parameter for client to locate meta. */ + public static final String CLIENT_ZOOKEEPER_QUORUM = "hbase.client.zookeeper.quorum"; + + /** Client port of ZooKeeper for client to locate meta */ + public static final String CLIENT_ZOOKEEPER_CLIENT_PORT = + "hbase.client.zookeeper.property.clientPort"; + + /** Indicate whether the client ZK are observer nodes of the server ZK */ + public static final String CLIENT_ZOOKEEPER_OBSERVER_MODE = + "hbase.client.zookeeper.observer.mode"; + /** Assuming client zk not in observer mode and master need to synchronize information */ + public static final boolean DEFAULT_CLIENT_ZOOKEEPER_OBSERVER_MODE = false; + /** Common prefix of ZooKeeper configuration properties */ public static final String ZK_CFG_PROPERTY_PREFIX = "hbase.zookeeper.property."; @@ -201,7 +214,7 @@ public final class HConstants { ZK_CFG_PROPERTY_PREFIX + CLIENT_PORT_STR; /** Default client port that the zookeeper listens on */ - public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181; + public static final int DEFAULT_ZOOKEEPER_CLIENT_PORT = 2181; /** Parameter name for the root dir in ZK for this cluster */ public stati
[25/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/apache_hbase_reference_guide.pdf -- diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf index 2c0540e..ec799a6 100644 --- a/apache_hbase_reference_guide.pdf +++ b/apache_hbase_reference_guide.pdf @@ -5,16 +5,16 @@ /Author (Apache HBase Team) /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2) /Producer (Apache HBase Team) -/ModDate (D:20180327144547+00'00') -/CreationDate (D:20180327144547+00'00') +/ModDate (D:20180328144700+00'00') +/CreationDate (D:20180328144700+00'00') >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R /Names 26 0 R -/Outlines 4515 0 R -/PageLabels 4738 0 R +/Outlines 4514 0 R +/PageLabels 4737 0 R /PageMode /UseOutlines /OpenAction [7 0 R /FitH 842.89] /ViewerPreferences << /DisplayDocTitle true @@ -24,7 +24,7 @@ endobj 3 0 obj << /Type /Pages /Count 705 -/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 0 R 54 0 R 61 0 R 65 0 R 67 0 R 69 0 R 76 0 R 79 0 R 81 0 R 87 0 R 90 0 R 92 0 R 94 0 R 101 0 R 107 0 R 112 0 R 114 0 R 130 0 R 135 0 R 142 0 R 151 0 R 159 0 R 168 0 R 179 0 R 183 0 R 185 0 R 189 0 R 198 0 R 207 0 R 215 0 R 224 0 R 229 0 R 238 0 R 246 0 R 255 0 R 268 0 R 275 0 R 285 0 R 293 0 R 301 0 R 308 0 R 316 0 R 322 0 R 328 0 R 335 0 R 343 0 R 354 0 R 363 0 R 375 0 R 383 0 R 391 0 R 398 0 R 407 0 R 415 0 R 425 0 R 433 0 R 440 0 R 449 0 R 461 0 R 470 0 R 477 0 R 485 0 R 493 0 R 502 0 R 509 0 R 514 0 R 518 0 R 523 0 R 527 0 R 543 0 R 554 0 R 558 0 R 573 0 R 578 0 R 583 0 R 585 0 R 587 0 R 590 0 R 592 0 R 594 0 R 602 0 R 608 0 R 613 0 R 618 0 R 625 0 R 635 0 R 643 0 R 647 0 R 651 0 R 653 0 R 664 0 R 676 0 R 686 0 R 698 0 R 708 0 R 716 0 R 721 0 R 727 0 R 730 0 R 734 0 R 738 0 R 741 0 R 744 0 R 746 0 R 749 0 R 754 0 R 756 0 R 761 0 R 765 0 R 770 0 R 774 0 R 777 0 R 783 0 R 785 0 R 790 0 R 798 0 R 800 0 R 803 0 R 806 0 R 810 0 R 813 0 R 828 0 R 835 0 R 844 0 R 855 0 R 861 0 R 871 0 R 882 0 R 885 0 R 889 0 R 892 0 R 897 0 R 906 0 R 914 0 R 918 0 R 922 0 R 927 0 R 931 0 R 933 0 R 948 0 R 959 0 R 964 0 R 970 0 R 973 0 R 981 0 R 990 0 R 995 0 R 1000 0 R 1005 0 R 1007 0 R 1009 0 R 1011 0 R 1021 0 R 1029 0 R 1033 0 R 1040 0 R 1047 0 R 1055 0 R 1059 0 R 1065 0 R 1070 0 R 1078 0 R 1082 0 R 1087 0 R 1089 0 R 1095 0 R 1102 0 R 1104 0 R 0 R 1122 0 R 1126 0 R 1128 0 R 1130 0 R 1134 0 R 1137 0 R 1142 0 R 1145 0 R 1157 0 R 1161 0 R 1167 0 R 1175 0 R 1180 0 R 1184 0 R 1188 0 R 1190 0 R 1193 0 R 1196 0 R 1199 0 R 1203 0 R 1207 0 R 1211 0 R 1216 0 R 1220 0 R 1223 0 R 1225 0 R 1235 0 R 1238 0 R 1246 0 R 1255 0 R 1261 0 R 1265 0 R 1267 0 R 1277 0 R 1280 0 R 1286 0 R 1295 0 R 1298 0 R 1305 0 R 1313 0 R 1315 0 R 1317 0 R 1326 0 R 1328 0 R 1330 0 R 1333 0 R 1335 0 R 1337 0 R 1339 0 R 1341 0 R 1344 0 R 1348 0 R 1353 0 R 1355 0 R 1357 0 R 1359 0 R 1364 0 R 1372 0 R 1377 0 R 1380 0 R 1382 0 R 1385 0 R 1389 0 R 1393 0 R 1396 0 R 1398 0 R 1400 0 R 1403 0 R 1409 0 R 1414 0 R 1422 0 R 1436 0 R 1450 0 R 1454 0 R 1459 0 R 1472 0 R 1477 0 R 1492 0 R 1500 0 R 1504 0 R 1512 0 R 1527 0 R 1541 0 R 1553 0 R 1558 0 R 1564 0 R 1573 0 R 1579 0 R 1584 0 R 1592 0 R 1595 0 R 1605 0 R 1611 0 R 1614 0 R 1627 0 R 1629 0 R 1635 0 R 1639 0 R 1641 0 R 1649 0 R 1657 0 R 1661 0 R 1663 0 R 1665 0 R 1677 0 R 1683 0 R 1692 0 R 1698 0 R 1712 0 R 1717 0 R 1726 0 R 1734 0 R 1740 0 R 1747 0 R 1752 0 R 1755 0 R 1757 0 R 1763 0 R 1767 0 R 1773 0 R 1777 0 R 1785 0 R 1791 0 R 1796 0 R 1801 0 R 1803 0 R 1811 0 R 1818 0 R 1824 0 R 1829 0 R 1833 0 R 1836 0 R 1841 0 R 1847 0 R 1855 0 R 1857 0 R 1859 0 R 1862 0 R 1870 0 R 1873 0 R 1880 0 R 1889 0 R 1892 0 R 1897 0 R 1899 0 R 1902 0 R 1905 0 R 1908 0 R 1919 0 R 1924 0 R 1929 0 R 1931 0 R 1940 0 R 1947 0 R 1955 0 R 1961 0 R 1966 0 R 1968 0 R 1977 0 R 1986 0 R 1997 0 R 2003 0 R 2010 0 R 2012 0 R 2017 0 R 2019 0 R 2021 0 R 2024 0 R 2027 0 R 2030 0 R 2035 0 R 2039 0 R 2050 0 R 2053 0 R 2058 0 R 2061 0 R 2063 0 R 2068 0 R 2078 0 R 2080 0 R 2082 0 R 2084 0 R 2086 0 R 2089 0 R 2091 0 R 2093 0 R 2096 0 R 2098 0 R 2100 0 R 2104 0 R 2109 0 R 2118 0 R 2120 0 R 2122 0 R 2129 0 R 2131 0 R 2136 0 R 2138 0 R 2140 0 R 2147 0 R 2152 0 R 2156 0 R 2160 0 R 2164 0 R 2166 0 R 2168 0 R 2172 0 R 2175 0 R 2177 0 R 2179 0 R 2183 0 R 2185 0 R 2188 0 R 2190 0 R 2192 0 R 2194 0 R 2201 0 R 2204 0 R 2209 0 R 2211 0 R 2213 0 R 2215 0 R 2217 0 R 2225 0 R 2236 0 R 2250 0 R 2261 0 R 2265 0 R 2271 0 R 2275 0 R 2278 0 R 2283 0 R 2288 0 R 2290 0 R 2293 0 R 2295 0 R 2297 0 R 2299 0 R 2304 0 R 2306 0 R 2319 0 R 2322 0 R 2330 0 R 2336 0 R 2348 0 R 2362 0 R 2375 0 R 2392 0 R 2396 0 R 2398 0 R 2402 0 R 2420 0 R 2427 0 R 2439 0 R 2443 0 R 2447 0 R 2456 0 R 2468 0 R 2473 0 R 2483 0 R 2496 0 R 2516 0 R 2525 0 R 2528 0 R 2537 0 R 2554 0 R 2561 0 R 2564 0 R 2569 0 R 2573 0 R 2576 0 R 2585 0 R 2594 0 R 2597 0 R 2599 0 R 2603 0 R 2618 0 R 2626 0 R 2631 0 R 2636 0 R 2639 0 R 2641
[05/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestRpcAccessChecks.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestRpcAccessChecks.html b/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestRpcAccessChecks.html new file mode 100644 index 000..e00d0ac --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/security/access/class-use/TestRpcAccessChecks.html @@ -0,0 +1,125 @@ +http://www.w3.org/TR/html4/loose.dtd";> + + + + + +Uses of Class org.apache.hadoop.hbase.security.access.TestRpcAccessChecks (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +No Frames + + +All Classes + + + + + + + + + + +Uses of Classorg.apache.hadoop.hbase.security.access.TestRpcAccessChecks + +No usage of org.apache.hadoop.hbase.security.access.TestRpcAccessChecks + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +No Frames + + +All Classes + + + + + + + + + +Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights reserved. + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/org/apache/hadoop/hbase/security/access/package-frame.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/access/package-frame.html b/testdevapidocs/org/apache/hadoop/hbase/security/access/package-frame.html index 6283257..6c01025 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/security/access/package-frame.html +++ b/testdevapidocs/org/apache/hadoop/hbase/security/access/package-frame.html @@ -14,7 +14,7 @@ Interfaces SecureTestUtil.AccessTestAction -TestAdminOnlyOperations.Action +TestRpcAccessChecks.Action Classes @@ -29,13 +29,13 @@ TestAccessController2.MyAccessController TestAccessController3 TestAccessController3.FaultyAccessController -TestAdminOnlyOperations -TestAdminOnlyOperations.DummyCpService TestCellACLs TestCellACLWithMultipleVersions TestCoprocessorWhitelistMasterObserver TestCoprocessorWhitelistMasterObserver.TestRegionObserver TestNamespaceCommands +TestRpcAccessChecks +TestRpcAccessChecks.DummyCpService TestScanEarlyTermination TestTablePermissions TestWithDisabledAuthorization http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/org/apache/hadoop/hbase/security/access/package-summary.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/access/package-summary.html b/testdevapidocs/org/apache/hadoop/hbase/security/access/package-summary.html index fceea8c..61b08be 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/security/access/package-summary.html +++ b/testdevapidocs/org/apache/hadoop/hbase/security/access/package-summary.html @@ -90,7 +90,7 @@ -TestAdminOnlyOperations.Action +TestRpcAccessChecks.Action @@ -156,16 +156,6 @@ -TestAdminOnlyOperations - -This class tests operations in MasterRpcServices which require ADMIN access. - - - -TestAdminOnlyOperations.DummyCpService - - - TestCellACLs @@ -188,6 +178,16 @@ +TestRpcAccessChecks + +This class tests operations in MasterRpcServices which require ADMIN access. + + + +TestRpcAccessChecks.DummyCpService + + + TestScanEarlyTermination http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/org/apache/hadoop/hbase/security/access/package-tree.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/access/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/security/access/package-tree.html index 82c29d5..cf1b6fe 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/security/access/package-tree.html +++ b/testdevapidocs/org/apache/hadoop/hbase/security/access/package-tree.html @@
[21/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html index 33418d0..3647d12 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html @@ -30,293 +30,296 @@ 022import java.io.IOException; 023import java.io.RandomAccessFile; 024import java.nio.ByteBuffer; -025import java.nio.channels.ClosedChannelException; -026import java.nio.channels.FileChannel; -027import java.util.Arrays; -028import org.apache.hadoop.hbase.io.hfile.Cacheable; -029import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; -030import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; -031import org.apache.hadoop.hbase.nio.ByteBuff; -032import org.apache.hadoop.hbase.nio.SingleByteBuff; -033import org.apache.hadoop.util.StringUtils; -034import org.apache.yetus.audience.InterfaceAudience; -035import org.slf4j.Logger; -036import org.slf4j.LoggerFactory; -037 -038import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -039import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -040 -041/** -042 * IO engine that stores data to a file on the local file system. -043 */ -044@InterfaceAudience.Private -045public class FileIOEngine implements IOEngine { -046 private static final Logger LOG = LoggerFactory.getLogger(FileIOEngine.class); -047 public static final String FILE_DELIMITER = ","; -048 private final String[] filePaths; -049 private final FileChannel[] fileChannels; -050 private final RandomAccessFile[] rafs; -051 -052 private final long sizePerFile; -053 private final long capacity; -054 -055 private FileReadAccessor readAccessor = new FileReadAccessor(); -056 private FileWriteAccessor writeAccessor = new FileWriteAccessor(); -057 -058 public FileIOEngine(long capacity, boolean maintainPersistence, String... filePaths) -059 throws IOException { -060this.sizePerFile = capacity / filePaths.length; -061this.capacity = this.sizePerFile * filePaths.length; -062this.filePaths = filePaths; -063this.fileChannels = new FileChannel[filePaths.length]; -064if (!maintainPersistence) { -065 for (String filePath : filePaths) { -066File file = new File(filePath); -067if (file.exists()) { -068 if (LOG.isDebugEnabled()) { -069LOG.debug("File " + filePath + " already exists. Deleting!!"); -070 } -071 file.delete(); -072 // If deletion fails still we can manage with the writes -073} -074 } -075} -076this.rafs = new RandomAccessFile[filePaths.length]; -077for (int i = 0; i < filePaths.length; i++) { -078 String filePath = filePaths[i]; -079 try { -080rafs[i] = new RandomAccessFile(filePath, "rw"); -081long totalSpace = new File(filePath).getTotalSpace(); -082if (totalSpace < sizePerFile) { -083 // The next setting length will throw exception,logging this message -084 // is just used for the detail reason of exceptionï¼ -085 String msg = "Only " + StringUtils.byteDesc(totalSpace) -086 + " total space under " + filePath + ", not enough for requested " -087 + StringUtils.byteDesc(sizePerFile); -088 LOG.warn(msg); -089} -090rafs[i].setLength(sizePerFile); -091fileChannels[i] = rafs[i].getChannel(); -092LOG.info("Allocating cache " + StringUtils.byteDesc(sizePerFile) -093+ ", on the path:" + filePath); -094 } catch (IOException fex) { -095LOG.error("Failed allocating cache on " + filePath, fex); -096shutdown(); -097throw fex; -098 } -099} -100 } -101 -102 @Override -103 public String toString() { -104return "ioengine=" + this.getClass().getSimpleName() + ", paths=" -105+ Arrays.asList(filePaths) + ", capacity=" + String.format("%,d", this.capacity); -106 } -107 -108 /** -109 * File IO engine is always able to support persistent storage for the cache -110 * @return true -111 */ -112 @Override -113 public boolean isPersistent() { -114return true; -115 } -116 -117 /** -118 * Transfers data from file to the given byte buffer -119 * @param offset The offset in the file where the first byte to be read -120 * @param length The length of buffer that should be allocated for reading -121 * from the file channel -122 * @return number of bytes read -123 * @throws IOException -124 */ -125 @Override -126 publi
[12/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html index eccc4a3..ebbde54 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html @@ -1744,1869 +1744,1872 @@ 1736 HRegion region = getRegion(request.getRegion()); 1737 RegionInfo info = region.getRegionInfo(); 1738 byte[] bestSplitRow = null; -1739 if (request.hasBestSplitRow() && request.getBestSplitRow()) { -1740HRegion r = region; -1741 region.startRegionOperation(Operation.SPLIT_REGION); -1742r.forceSplit(null); -1743bestSplitRow = r.checkSplit(); -1744// when all table data are in memstore, bestSplitRow = null -1745// try to flush region first -1746if(bestSplitRow == null) { -1747 r.flush(true); -1748 bestSplitRow = r.checkSplit(); -1749} -1750r.clearSplit(); -1751 } -1752 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); -1753 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); -1754 if (request.hasCompactionState() && request.getCompactionState()) { -1755 builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState())); -1756 } -1757 builder.setSplittable(region.isSplittable()); -1758 builder.setMergeable(region.isMergeable()); -1759 if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) { -1760 builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow)); -1761 } -1762 return builder.build(); -1763} catch (IOException ie) { -1764 throw new ServiceException(ie); -1765} -1766 } -1767 -1768 @Override -1769 @QosPriority(priority=HConstants.ADMIN_QOS) -1770 public GetRegionLoadResponse getRegionLoad(RpcController controller, -1771 GetRegionLoadRequest request) throws ServiceException { -1772 -1773Listregions; -1774if (request.hasTableName()) { -1775 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); -1776 regions = regionServer.getRegions(tableName); -1777} else { -1778 regions = regionServer.getRegions(); -1779} -1780List rLoads = new ArrayList<>(regions.size()); -1781RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); -1782RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); -1783 -1784try { -1785 for (HRegion region : regions) { -1786 rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); -1787 } -1788} catch (IOException e) { -1789 throw new ServiceException(e); -1790} -1791GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); -1792builder.addAllRegionLoads(rLoads); -1793return builder.build(); -1794 } -1795 -1796 @Override -1797 @QosPriority(priority=HConstants.ADMIN_QOS) -1798 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, -1799ClearCompactionQueuesRequest request) throws ServiceException { -1800LOG.debug("Client=" + RpcServer.getRequestUserName().orElse(null) + "/" -1801+ RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue"); -1802 ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder(); -1803requestCount.increment(); -1804if (clearCompactionQueues.compareAndSet(false,true)) { -1805 try { -1806checkOpen(); -1807 regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues(); -1808for (String queueName : request.getQueueNameList()) { -1809 LOG.debug("clear " + queueName + " compaction queue"); -1810 switch (queueName) { -1811case "long": -1812 regionServer.compactSplitThread.clearLongCompactionsQueue(); -1813 break; -1814case "short": -1815 regionServer.compactSplitThread.clearShortCompactionsQueue(); +1739 boolean shouldSplit = true; +1740 if (request.hasBestSplitRow() && request.getBestSplitRow()) { +1741HRegion r = region; +1742 region.startRegionOperation(Operation.SPLIT_REGION); +1743r.forceSplit(null); +1744// Even after setting force split if split policy says no to split then we should not split. +1745shouldSplit = region.getSplitPolicy().shouldSplit() && !info.isMetaRegion
[17/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html index eccc4a3..ebbde54 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html @@ -1744,1869 +1744,1872 @@ 1736 HRegion region = getRegion(request.getRegion()); 1737 RegionInfo info = region.getRegionInfo(); 1738 byte[] bestSplitRow = null; -1739 if (request.hasBestSplitRow() && request.getBestSplitRow()) { -1740HRegion r = region; -1741 region.startRegionOperation(Operation.SPLIT_REGION); -1742r.forceSplit(null); -1743bestSplitRow = r.checkSplit(); -1744// when all table data are in memstore, bestSplitRow = null -1745// try to flush region first -1746if(bestSplitRow == null) { -1747 r.flush(true); -1748 bestSplitRow = r.checkSplit(); -1749} -1750r.clearSplit(); -1751 } -1752 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); -1753 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); -1754 if (request.hasCompactionState() && request.getCompactionState()) { -1755 builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState())); -1756 } -1757 builder.setSplittable(region.isSplittable()); -1758 builder.setMergeable(region.isMergeable()); -1759 if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) { -1760 builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow)); -1761 } -1762 return builder.build(); -1763} catch (IOException ie) { -1764 throw new ServiceException(ie); -1765} -1766 } -1767 -1768 @Override -1769 @QosPriority(priority=HConstants.ADMIN_QOS) -1770 public GetRegionLoadResponse getRegionLoad(RpcController controller, -1771 GetRegionLoadRequest request) throws ServiceException { -1772 -1773Listregions; -1774if (request.hasTableName()) { -1775 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); -1776 regions = regionServer.getRegions(tableName); -1777} else { -1778 regions = regionServer.getRegions(); -1779} -1780List rLoads = new ArrayList<>(regions.size()); -1781RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); -1782RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); -1783 -1784try { -1785 for (HRegion region : regions) { -1786 rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); -1787 } -1788} catch (IOException e) { -1789 throw new ServiceException(e); -1790} -1791GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); -1792builder.addAllRegionLoads(rLoads); -1793return builder.build(); -1794 } -1795 -1796 @Override -1797 @QosPriority(priority=HConstants.ADMIN_QOS) -1798 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, -1799ClearCompactionQueuesRequest request) throws ServiceException { -1800LOG.debug("Client=" + RpcServer.getRequestUserName().orElse(null) + "/" -1801+ RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue"); -1802 ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder(); -1803requestCount.increment(); -1804if (clearCompactionQueues.compareAndSet(false,true)) { -1805 try { -1806checkOpen(); -1807 regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues(); -1808for (String queueName : request.getQueueNameList()) { -1809 LOG.debug("clear " + queueName + " compaction queue"); -1810 switch (queueName) { -1811case "long": -1812 regionServer.compactSplitThread.clearLongCompactionsQueue(); -1813 break; -1814case "short": -1815 regionServer.compactSplitThread.clearShortCompactionsQueue(); +1739 boolean shouldSplit = true; +1740 if (request.hasBestSplitRow() && request.getBestSplitRow()) { +1741HRegion r = region; +1742 region.startRegionOperation(Operation.SPLIT_REGION); +1743r.forceSplit(null); +1744// Even after setting force split if split policy says no to split then we should not split. +1745shouldSplit = region.getSplitPolicy().shouldSplit() && !info.isMetaRegion(); +1746bes
[16/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html index eccc4a3..ebbde54 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html @@ -1744,1869 +1744,1872 @@ 1736 HRegion region = getRegion(request.getRegion()); 1737 RegionInfo info = region.getRegionInfo(); 1738 byte[] bestSplitRow = null; -1739 if (request.hasBestSplitRow() && request.getBestSplitRow()) { -1740HRegion r = region; -1741 region.startRegionOperation(Operation.SPLIT_REGION); -1742r.forceSplit(null); -1743bestSplitRow = r.checkSplit(); -1744// when all table data are in memstore, bestSplitRow = null -1745// try to flush region first -1746if(bestSplitRow == null) { -1747 r.flush(true); -1748 bestSplitRow = r.checkSplit(); -1749} -1750r.clearSplit(); -1751 } -1752 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); -1753 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); -1754 if (request.hasCompactionState() && request.getCompactionState()) { -1755 builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState())); -1756 } -1757 builder.setSplittable(region.isSplittable()); -1758 builder.setMergeable(region.isMergeable()); -1759 if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) { -1760 builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow)); -1761 } -1762 return builder.build(); -1763} catch (IOException ie) { -1764 throw new ServiceException(ie); -1765} -1766 } -1767 -1768 @Override -1769 @QosPriority(priority=HConstants.ADMIN_QOS) -1770 public GetRegionLoadResponse getRegionLoad(RpcController controller, -1771 GetRegionLoadRequest request) throws ServiceException { -1772 -1773Listregions; -1774if (request.hasTableName()) { -1775 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); -1776 regions = regionServer.getRegions(tableName); -1777} else { -1778 regions = regionServer.getRegions(); -1779} -1780List rLoads = new ArrayList<>(regions.size()); -1781RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); -1782RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); -1783 -1784try { -1785 for (HRegion region : regions) { -1786 rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); -1787 } -1788} catch (IOException e) { -1789 throw new ServiceException(e); -1790} -1791GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); -1792builder.addAllRegionLoads(rLoads); -1793return builder.build(); -1794 } -1795 -1796 @Override -1797 @QosPriority(priority=HConstants.ADMIN_QOS) -1798 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, -1799ClearCompactionQueuesRequest request) throws ServiceException { -1800LOG.debug("Client=" + RpcServer.getRequestUserName().orElse(null) + "/" -1801+ RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue"); -1802 ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder(); -1803requestCount.increment(); -1804if (clearCompactionQueues.compareAndSet(false,true)) { -1805 try { -1806checkOpen(); -1807 regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues(); -1808for (String queueName : request.getQueueNameList()) { -1809 LOG.debug("clear " + queueName + " compaction queue"); -1810 switch (queueName) { -1811case "long": -1812 regionServer.compactSplitThread.clearLongCompactionsQueue(); -1813 break; -1814case "short": -1815 regionServer.compactSplitThread.clearShortCompactionsQueue(); +1739 boolean shouldSplit = true; +1740 if (request.hasBestSplitRow() && request.getBestSplitRow()) { +1741HRegion r = region; +1742 region.startRegionOperation(Operation.SPLIT_REGION); +1743r.forceSplit(null); +1744// Even after setting force split if split policy says no to split then we should not split. +1745shouldSplit = regi
[23/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/checkstyle-aggregate.html -- diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html index 77b9919..17d273e 100644 --- a/checkstyle-aggregate.html +++ b/checkstyle-aggregate.html @@ -7,7 +7,7 @@ - + Apache HBase – Checkstyle Results @@ -176,9 +176,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -277,7 +274,7 @@ 3595 0 0 -15918 +15919 Files @@ -5025,7 +5022,7 @@ org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java 0 0 -7 +8 org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java 0 @@ -10320,12 +10317,12 @@ http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation";>JavadocTagContinuationIndentation offset: "2" -784 +798 Error http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription";>NonEmptyAtclauseDescription -3849 +3835 Error misc @@ -10343,7 +10340,7 @@ max: "100" ignorePattern: "^package.*|^import.*|a href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated" -1619 +1620 Error @@ -14534,7 +14531,7 @@ Error javadoc -NonEmptyAtclauseDescription +JavadocTagContinuationIndentation Javadoc comment at column 26 has parse error. Missed HTML close tag 'arg'. Sometimes it means that close tag missed for one of previous tags. 44 @@ -15170,7 +15167,7 @@ Error javadoc -NonEmptyAtclauseDescription +JavadocTagContinuationIndentation Javadoc comment at column 4 has parse error. Missed HTML close tag 'pre'. Sometimes it means that close tag missed for one of previous tags. 59 @@ -16937,7 +16934,7 @@ Error javadoc -NonEmptyAtclauseDescription +JavadocTagContinuationIndentation Javadoc comment at column 19 has parse error. Details: no viable alternative at input '\n * List
[14/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html index eccc4a3..ebbde54 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html @@ -1744,1869 +1744,1872 @@ 1736 HRegion region = getRegion(request.getRegion()); 1737 RegionInfo info = region.getRegionInfo(); 1738 byte[] bestSplitRow = null; -1739 if (request.hasBestSplitRow() && request.getBestSplitRow()) { -1740HRegion r = region; -1741 region.startRegionOperation(Operation.SPLIT_REGION); -1742r.forceSplit(null); -1743bestSplitRow = r.checkSplit(); -1744// when all table data are in memstore, bestSplitRow = null -1745// try to flush region first -1746if(bestSplitRow == null) { -1747 r.flush(true); -1748 bestSplitRow = r.checkSplit(); -1749} -1750r.clearSplit(); -1751 } -1752 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); -1753 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); -1754 if (request.hasCompactionState() && request.getCompactionState()) { -1755 builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState())); -1756 } -1757 builder.setSplittable(region.isSplittable()); -1758 builder.setMergeable(region.isMergeable()); -1759 if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) { -1760 builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow)); -1761 } -1762 return builder.build(); -1763} catch (IOException ie) { -1764 throw new ServiceException(ie); -1765} -1766 } -1767 -1768 @Override -1769 @QosPriority(priority=HConstants.ADMIN_QOS) -1770 public GetRegionLoadResponse getRegionLoad(RpcController controller, -1771 GetRegionLoadRequest request) throws ServiceException { -1772 -1773Listregions; -1774if (request.hasTableName()) { -1775 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); -1776 regions = regionServer.getRegions(tableName); -1777} else { -1778 regions = regionServer.getRegions(); -1779} -1780List rLoads = new ArrayList<>(regions.size()); -1781RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); -1782RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); -1783 -1784try { -1785 for (HRegion region : regions) { -1786 rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); -1787 } -1788} catch (IOException e) { -1789 throw new ServiceException(e); -1790} -1791GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); -1792builder.addAllRegionLoads(rLoads); -1793return builder.build(); -1794 } -1795 -1796 @Override -1797 @QosPriority(priority=HConstants.ADMIN_QOS) -1798 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, -1799ClearCompactionQueuesRequest request) throws ServiceException { -1800LOG.debug("Client=" + RpcServer.getRequestUserName().orElse(null) + "/" -1801+ RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue"); -1802 ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder(); -1803requestCount.increment(); -1804if (clearCompactionQueues.compareAndSet(false,true)) { -1805 try { -1806checkOpen(); -1807 regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues(); -1808for (String queueName : request.getQueueNameList()) { -1809 LOG.debug("clear " + queueName + " compaction queue"); -1810 switch (queueName) { -1811case "long": -1812 regionServer.compactSplitThread.clearLongCompactionsQueue(); -1813 break; -1814case "short": -1815 regionServer.compactSplitThread.clearShortCompactionsQueue(); +1739 boolean shouldSplit = true; +1740 if (request.hasBestSplitRow() && request.getBestSplitRow()) { +1741HRegion r = region; +1742 region.startRegionOperation(Operation.SPLIT_REGION); +1743r.forceSplit(null); +1744// Even after setting force split if split policy says no to split then we should not split. +1745shouldSpl
[09/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/index-all.html -- diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html index 5275f49..153f794 100644 --- a/testdevapidocs/index-all.html +++ b/testdevapidocs/index-all.html @@ -5869,8 +5869,6 @@ CLASS_RULE - Static variable in class org.apache.hadoop.hbase.security.access.TestAccessController3 -CLASS_RULE - Static variable in class org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations - CLASS_RULE - Static variable in class org.apache.hadoop.hbase.security.access.TestCellACLs CLASS_RULE - Static variable in class org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions @@ -5879,6 +5877,8 @@ CLASS_RULE - Static variable in class org.apache.hadoop.hbase.security.access.TestNamespaceCommands +CLASS_RULE - Static variable in class org.apache.hadoop.hbase.security.access.TestRpcAccessChecks + CLASS_RULE - Static variable in class org.apache.hadoop.hbase.security.access.TestScanEarlyTermination CLASS_RULE - Static variable in class org.apache.hadoop.hbase.security.access.TestTablePermissions @@ -8022,14 +8022,14 @@ conf - Static variable in class org.apache.hadoop.hbase.security.access.TestAccessController3 -conf - Static variable in class org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations - conf - Static variable in class org.apache.hadoop.hbase.security.access.TestCellACLs conf - Static variable in class org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions conf - Static variable in class org.apache.hadoop.hbase.security.access.TestNamespaceCommands +conf - Static variable in class org.apache.hadoop.hbase.security.access.TestRpcAccessChecks + conf - Static variable in class org.apache.hadoop.hbase.security.access.TestScanEarlyTermination conf - Variable in class org.apache.hadoop.hbase.security.token.TestTokenAuthentication.TokenServer @@ -12239,7 +12239,7 @@ DummyCoprocessorService() - Constructor for class org.apache.hadoop.hbase.coprocessor.TestCoprocessorServiceBackwardCompatibility.DummyCoprocessorService -DummyCpService() - Constructor for class org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations.DummyCpService +DummyCpService() - Constructor for class org.apache.hadoop.hbase.security.access.TestRpcAccessChecks.DummyCpService DummyFilter - Class in org.apache.hadoop.hbase.rest @@ -12429,7 +12429,7 @@ enableSecurity(Configuration) - Static method in class org.apache.hadoop.hbase.security.access.SecureTestUtil -enableSecurity(Configuration) - Static method in class org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations +enableSecurity(Configuration) - Static method in class org.apache.hadoop.hbase.security.access.TestRpcAccessChecks enableShortCircuit() - Method in class org.apache.hadoop.hbase.HBaseTestingUtility @@ -17980,7 +17980,7 @@ getServices() - Method in class org.apache.hadoop.hbase.security.access.TestAccessController.PingCoprocessor -getServices() - Method in class org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations.DummyCpService +getServices() - Method in class org.apache.hadoop.hbase.security.access.TestRpcAccessChecks.DummyCpService getServiceUser(ClusterManager.ServiceType) - Method in class org.apache.hadoop.hbase.HBaseClusterManager @@ -18800,10 +18800,10 @@ GROUP_ADMIN - Static variable in class org.apache.hadoop.hbase.security.access.TestAccessController3 -GROUP_ADMIN - Static variable in class org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations - GROUP_ADMIN - Static variable in class org.apache.hadoop.hbase.security.access.TestNamespaceCommands +GROUP_ADMIN - Static variable in class org.apache.hadoop.hbase.security.access.TestRpcAccessChecks + GROUP_CREATE - Static variable in class org.apache.hadoop.hbase.rsgroup.TestRSGroupsWithACL GROUP_CREATE - Static variable in class org.apache.hadoop.hbase.security.access.TestAccessController @@ -33717,7 +33717,7 @@ run(String[]) - Method in class org.apache.hadoop.hbase.rest.PerformanceEvaluation -run(Admin) - Method in interface org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations.Action +run(Admin) - Method in interface org.apache.hadoop.hbase.security.access.TestRpcAccessChecks.Action run() - Method in class org.apache.hadoop.hbase.security.TestSecureIPC.TestThread @@ -36108,12 +36108,12 @@ setUp() - Method in class org.apache.hadoop.hbase.security.access.TestAccessController2 -setup() - Static method in class org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations - setUp() - Method in class org.apache.hadoop.hbase.security.access.TestCellACLs setUp() - Method in class org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions +setup() - Static method in class
[08/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html index 9cd8a90..798637d 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10}; +var methods = {"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab"; -public class TestAdmin1 +public class TestAdmin1 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true"; title="class or interface in java.lang">Object Class to test HBaseAdmin. Spins up the minicluster once at test start and then takes it down afterward. @@ -311,26 +311,30 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html void -testTableAvailableWithRandomSplitKeys() +testSplitShouldNotHappenIfSplitIsDisabledForTable() void -testTableExist() +testTableAvailableWithRandomSplitKeys() void -testTruncateTable() +testTableExist() +void +testTruncateTable() + + private void testTruncateTable(org.apache.hadoop.hbase.TableName tableName, boolean preserveSplits) - + void testTruncateTablePreservingSplits() - + protected void verifyRoundRobinDistribution(org.apache.hadoop.hbase.client.ClusterConnection c, org.apache.hadoop.hbase.client.RegionLocator regionLocator, @@ -364,7 +368,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html CLASS_RULE -public static final HBaseClassTestRule CLASS_RULE +public static final HBaseClassTestRule CLASS_RULE @@ -373,7 +377,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html LOG -private static final org.slf4j.Logger LOG +private static final org.slf4j.Logger LOG @@ -382,7 +386,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html TEST_UTIL -private static final HBaseTestingUtility TEST_UTIL +private static final HBaseTestingUtility TEST_UTIL @@ -391,7 +395,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html admin -private org.apache.hadoop.hbase.client.Admin admin +private org.apache.hadoop.hbase.client.Admin admin @@ -400,7 +404,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html name -public org.junit.rules.TestName name +public org.junit.rules.TestName name @@ -417,7 +421,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html TestAdmin1 -public TestAdmin1() +public TestAdmin1() @@ -434,7 +438,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html setUpBeforeClass -public static void setUpBeforeClass() +public static void setUpBeforeClass() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -448,7 +452,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html tearDownAfterClass -public static void tearDownAfterClass() +public static void tearDownAfterClass() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -462,7 +466,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html setUp -public void setUp() +public void setUp() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -476,7 +480,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html tearDown -public void tearDown() +public void tearDown() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in
[24/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/book.html -- diff --git a/book.html b/book.html index 2770027..04d2890 100644 --- a/book.html +++ b/book.html @@ -890,8 +890,8 @@ This step is offered for testing and learning purposes only. You can start up to 9 backup HMaster servers, which makes 10 total HMasters, counting the primary. To start a backup HMaster, use the local-master-backup.sh. For each backup master you want to start, add a parameter representing the port offset for that master. -Each HMaster uses three ports (16010, 16020, and 16030 by default). The port offset is added to these ports, so using an offset of 2, the backup HMaster would use ports 16012, 16022, and 16032. -The following command starts 3 backup servers using ports 16012/16022/16032, 16013/16023/16033, and 16015/16025/16035. +Each HMaster uses two ports (16000 and 16010 by default). The port offset is added to these ports, so using an offset of 2, the backup HMaster would use ports 16002 and 16012. +The following command starts 3 backup servers using ports 16002/16012, 16003/16013, and 16005/16015. @@ -920,7 +920,7 @@ The local-regionservers.sh command allows you to run multiple Regio It works in a similar way to the local-master-backup.sh command, in that each parameter you provide represents the port offset for an instance. Each RegionServer requires two ports, and the default ports are 16020 and 16030. Since HBase version 1.1.0, HMaster doesn’t use region server ports, this leaves 10 ports (16020 to 16029 and 16030 to 16039) to be used for RegionServers. -For supporting additional RegionServers, base ports can be changed in script 'local-regionservers.sh' to appropriate value. +For supporting additional RegionServers, set environment variables HBASE_RS_BASE_PORT and HBASE_RS_INFO_BASE_PORT to appropriate values before running script local-regionservers.sh. e.g. With values 16200 and 16300 for base ports, 99 additional RegionServers can be supported, on a server. The following command starts four additional RegionServers, running on sequential ports starting at 16022/16032 (base ports 16020/16030 plus 2). @@ -6881,12 +6881,6 @@ Quitting... Please consult the documentation published specifically for the version of HBase that you are upgrading to for details on the upgrade process. - -13.4. Upgrading to 2.x - -Coming soon… - - The Apache HBase Shell @@ -15943,9 +15937,21 @@ These parameters will be explained in context, and then will be given in a table Being Stuck When the MemStore gets too large, it needs to flush its contents to a StoreFile. -However, a Store can only have hbase.hstore.blockingStoreFiles files, so the MemStore needs to wait for the number of StoreFiles to be reduced by one or more compactions. -However, if the MemStore grows larger than hbase.hregion.memstore.flush.size, it is not able to flush its contents to a StoreFile. -If the MemStore is too large and the number of StoreFiles is also too high, the algorithm is said to be "stuck". The compaction algorithm checks for this "stuck" situation and provides mechanisms to alleviate it. +However, Stores are configured with a bound on the number StoreFiles, +hbase.hstore.blockingStoreFiles, and if in excess, the MemStore flush must wait +until the StoreFile count is reduced by one or more compactions. If the MemStore +is too large and the number of StoreFiles is also too high, the algorithm is said +to be "stuck". By default we’ll wait on compactions up to +hbase.hstore.blockingWaitTime milliseconds. If this period expires, we’ll flush +anyways even though we are in excess of the +hbase.hstore.blockingStoreFiles count. + + +Upping the hbase.hstore.blockingStoreFiles count will allow flushes to happen +but a Store with many StoreFiles in will likely have higher read latencies. Try to +figure why Compactions are not keeping up. Is it a write spurt that is bringing +about this situation or is a regular occurance and the cluster is under-provisioned +for the volume of writes? @@ -32662,9 +32668,6 @@ Starting the mini-cluster takes about 20-30 seconds, but that should be appropri -To use an HBase mini-cluster on Microsoft Windows, you need to use a Cygwin environment. - - See the paper at http://blog.sematext.com/2010/08/30/hbase-case-study-using-hbasetestingutility-for-local-testing-development/";>HBase Case-Study: Using HBaseTestingUtility for Local Testing and Development (2010) for more information about HBaseTestingUtility. @@ -37001,7 +37004,7 @@ The server will return cellblocks compressed using this same compressor as long Version 3.0.0-SNAPSHOT -Last updated 2018-03-27 14:29:58 UTC +Last updated 2018-03-28 14:29:55 UTC http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/bulk-loads.html -
[02/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.Action.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.Action.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.Action.html new file mode 100644 index 000..127be96 --- /dev/null +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.Action.html @@ -0,0 +1,434 @@ +http://www.w3.org/TR/html4/loose.dtd";> + + +Source code + + + + +001 +002/** +003 * Licensed to the Apache Software Foundation (ASF) under one +004 * or more contributor license agreements. See the NOTICE file +005 * distributed with this work for additional information +006 * regarding copyright ownership. The ASF licenses this file +007 * to you under the Apache License, Version 2.0 (the +008 * "License"); you may not use this file except in compliance +009 * with the License. You may obtain a copy of the License at +010 * +011 * http://www.apache.org/licenses/LICENSE-2.0 +012 * +013 * Unless required by applicable law or agreed to in writing, software +014 * distributed under the License is distributed on an "AS IS" BASIS, +015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +016 * See the License for the specific language governing permissions and +017 * limitations under the License. +018 */ +019package org.apache.hadoop.hbase.security.access; +020 +021import static org.apache.hadoop.hbase.AuthUtil.toGroupEntry; +022import static org.junit.Assert.assertArrayEquals; +023import static org.junit.Assert.assertFalse; +024import static org.junit.Assert.assertTrue; +025import static org.junit.Assert.fail; +026import static org.mockito.Mockito.mock; +027 +028import com.google.protobuf.Service; +029import com.google.protobuf.ServiceException; +030import java.io.IOException; +031import java.security.PrivilegedExceptionAction; +032import java.util.Collections; +033import java.util.HashMap; +034import org.apache.hadoop.conf.Configuration; +035import org.apache.hadoop.hbase.Cell; +036import org.apache.hadoop.hbase.CellUtil; +037import org.apache.hadoop.hbase.HBaseClassTestRule; +038import org.apache.hadoop.hbase.HBaseTestingUtility; +039import org.apache.hadoop.hbase.ServerName; +040import org.apache.hadoop.hbase.TableName; +041import org.apache.hadoop.hbase.client.Admin; +042import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +043import org.apache.hadoop.hbase.client.Connection; +044import org.apache.hadoop.hbase.client.ConnectionFactory; +045import org.apache.hadoop.hbase.client.Get; +046import org.apache.hadoop.hbase.client.Put; +047import org.apache.hadoop.hbase.client.Result; +048import org.apache.hadoop.hbase.client.Table; +049import org.apache.hadoop.hbase.client.TableDescriptor; +050import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +051import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +052import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; +053import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; +054import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos; +055import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; +056import org.apache.hadoop.hbase.security.AccessDeniedException; +057import org.apache.hadoop.hbase.security.User; +058import org.apache.hadoop.hbase.testclassification.MediumTests; +059import org.apache.hadoop.hbase.testclassification.SecurityTests; +060import org.apache.hadoop.hbase.util.Bytes; +061import org.junit.BeforeClass; +062import org.junit.ClassRule; +063import org.junit.Rule; +064import org.junit.Test; +065import org.junit.experimental.categories.Category; +066import org.junit.rules.TestName; +067 +068/** +069 * This class tests operations in MasterRpcServices which require ADMIN access. +070 * It doesn't test all operations which require ADMIN access, only those which get vetted within +071 * MasterRpcServices at the point of entry itself (unlike old approach of using +072 * hooks in AccessController). +073 * +074 * Sidenote: +075 * There is one big difference between how security tests for AccessController hooks work, and how +076 * the tests in this class for security in MasterRpcServices work. +077 * The difference arises because of the way AC & MasterRpcServices get the user. +078 * +079 * In AccessController, it first checks if there is an active rpc user in ObserverContext. If not, +080 * it uses UserProvider for current user. This *might* make sense in the context of coprocessors, +081 * because they can be called outside the context of RPCs. +082 * But in the context of MasterRpcServices, only one way makes sense - RPCServer.getRequestUser(). +083 * +084 * In AC tests, when we do FooU
[06/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.html b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.html new file mode 100644 index 000..80bb659 --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.html @@ -0,0 +1,763 @@ +http://www.w3.org/TR/html4/loose.dtd";> + + + + + +TestRpcAccessChecks (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + +var methods = {"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10}; +var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev Class +Next Class + + +Frames +No Frames + + +All Classes + + + + + + + +Summary: +Nested | +Field | +Constr | +Method + + +Detail: +Field | +Constr | +Method + + + + + + + + +org.apache.hadoop.hbase.security.access +Class TestRpcAccessChecks + + + +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true"; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.security.access.TestRpcAccessChecks + + + + + + + + +public class TestRpcAccessChecks +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true"; title="class or interface in java.lang">Object +This class tests operations in MasterRpcServices which require ADMIN access. + It doesn't test all operations which require ADMIN access, only those which get vetted within + MasterRpcServices at the point of entry itself (unlike old approach of using + hooks in AccessController). + + Sidenote: + There is one big difference between how security tests for AccessController hooks work, and how + the tests in this class for security in MasterRpcServices work. + The difference arises because of the way AC & MasterRpcServices get the user. + + In AccessController, it first checks if there is an active rpc user in ObserverContext. If not, + it uses UserProvider for current user. This *might* make sense in the context of coprocessors, + because they can be called outside the context of RPCs. + But in the context of MasterRpcServices, only one way makes sense - RPCServer.getRequestUser(). + + In AC tests, when we do FooUser.runAs on AccessController instance directly, it bypasses + the rpc framework completely, but works because UserProvider provides the correct user, i.e. + FooUser in this case. + + But this doesn't work for the tests here, so we go around by doing complete RPCs. + + + + + + + + + + + +Nested Class Summary + +Nested Classes + +Modifier and Type +Class and Description + + +(package private) static interface +TestRpcAccessChecks.Action + + +static class +TestRpcAccessChecks.DummyCpService + + + + + + + + + +Field Summary + +Fields + +Modifier and Type +Field and Description + + +static HBaseClassTestRule +CLASS_RULE + + +private static org.apache.hadoop.conf.Configuration +conf + + +private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String +GROUP_ADMIN + + +org.junit.rules.TestName +TEST_NAME + + +private static HBaseTestingUtility +TEST_UTIL + + +private static org.apache.hadoop.hbase.security.User +USER_ADMIN + + +private static org.apache.hadoop.hbase.security.User +USER_GROUP_ADMIN + + +private static org.apache.hadoop.hbase.security.User +USER_NON_ADMIN + + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +TestRpcAccessChecks() + + + + + + + + + +Method Summary + +All Methods Static Methods Instance Methods Concrete Methods + +Modifier and Type +Method and Description + + +private static void +enableSecurity(org.apache.hadoop.conf.Configuration conf) + + +static void +setup() + + +void +testCleanerChoreRunning() + + +void +testEnableCatalogJanitor() + + +void +testExecProcedure() + + +void +testExecProcedureWithRet() + + +void +te
[26/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48. Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/b2e10744 Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/b2e10744 Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/b2e10744 Branch: refs/heads/asf-site Commit: b2e1074488b71023f86790b5620d3003b625b785 Parents: 53df288 Author: jenkins Authored: Wed Mar 28 14:48:56 2018 + Committer: jenkins Committed: Wed Mar 28 14:48:56 2018 + -- acid-semantics.html | 7 +- apache_hbase_reference_guide.pdf| 29004 + book.html |35 +- bulk-loads.html | 7 +- checkstyle-aggregate.html | 18143 +-- checkstyle.rss |32 +- coc.html| 7 +- dependencies.html | 7 +- dependency-convergence.html | 7 +- dependency-info.html| 7 +- dependency-management.html | 7 +- devapidocs/constant-values.html | 6 +- .../hfile/bucket/FileIOEngine.FileAccessor.html | 4 +- .../bucket/FileIOEngine.FileReadAccessor.html | 6 +- .../bucket/FileIOEngine.FileWriteAccessor.html | 6 +- .../hbase/io/hfile/bucket/FileIOEngine.html |48 +- .../hbase/regionserver/RSRpcServices.html |76 +- .../org/apache/hadoop/hbase/Version.html| 6 +- .../hfile/bucket/FileIOEngine.FileAccessor.html | 577 +- .../bucket/FileIOEngine.FileReadAccessor.html | 577 +- .../bucket/FileIOEngine.FileWriteAccessor.html | 577 +- .../hbase/io/hfile/bucket/FileIOEngine.html | 577 +- ...tTableRegionProcedure.StoreFileSplitter.html | 2 +- .../assignment/SplitTableRegionProcedure.html | 2 +- .../regionserver/RSRpcServices.LogDelegate.html | 3691 +-- ...SRpcServices.RegionScannerCloseCallBack.html | 3691 +-- .../RSRpcServices.RegionScannerHolder.html | 3691 +-- ...pcServices.RegionScannerShippedCallBack.html | 3691 +-- ...RpcServices.RegionScannersCloseCallBack.html | 3691 +-- .../RSRpcServices.ScannerListener.html | 3691 +-- .../hbase/regionserver/RSRpcServices.html | 3691 +-- export_control.html | 7 +- index.html | 7 +- integration.html| 7 +- issue-tracking.html | 7 +- license.html| 7 +- mail-lists.html | 7 +- metrics.html| 7 +- old_news.html | 7 +- plugin-management.html | 7 +- plugins.html| 7 +- poweredbyhbase.html | 7 +- project-info.html | 7 +- project-reports.html| 7 +- project-summary.html| 7 +- pseudo-distributed.html | 7 +- replication.html| 7 +- resources.html | 7 +- source-repository.html | 7 +- sponsors.html | 7 +- supportingprojects.html | 7 +- team-list.html | 7 +- testdevapidocs/allclasses-frame.html| 6 +- testdevapidocs/allclasses-noframe.html | 6 +- testdevapidocs/constant-values.html |38 +- testdevapidocs/index-all.html |92 +- .../hbase/class-use/HBaseClassTestRule.html |10 +- .../hbase/class-use/HBaseTestingUtility.html| 8 +- .../apache/hadoop/hbase/client/TestAdmin1.html | 114 +- .../org/apache/hadoop/hbase/package-tree.html |10 +- .../hadoop/hbase/procedure/package-tree.html| 8 +- .../hadoop/hbase/procedure2/package-tree.html | 4 +- .../hadoop/hbase/regionserver/package-tree.html | 2 +- ...ccessController3.FaultyAccessController.html | 4 +- .../access/TestAdminOnlyOperations.Action.html | 231 - .../TestAdminOnlyOperations.DummyCpService.html | 336 - .../access/TestAdminOnlyOperations.html | 714 - .../hbase/security/access/TestCellACLs.html | 4 +- .../security/access/TestNamespaceCommands.html | 4 +- .../access/TestRpcAccessChecks.Action.html | 231 + .../TestRpcAccessChecks.DummyCpService.html | 336 + .../security/access/TestRpcAccessChecks.html| 763 + .../access/TestScanEarlyTe
[15/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html index eccc4a3..ebbde54 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html @@ -1744,1869 +1744,1872 @@ 1736 HRegion region = getRegion(request.getRegion()); 1737 RegionInfo info = region.getRegionInfo(); 1738 byte[] bestSplitRow = null; -1739 if (request.hasBestSplitRow() && request.getBestSplitRow()) { -1740HRegion r = region; -1741 region.startRegionOperation(Operation.SPLIT_REGION); -1742r.forceSplit(null); -1743bestSplitRow = r.checkSplit(); -1744// when all table data are in memstore, bestSplitRow = null -1745// try to flush region first -1746if(bestSplitRow == null) { -1747 r.flush(true); -1748 bestSplitRow = r.checkSplit(); -1749} -1750r.clearSplit(); -1751 } -1752 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); -1753 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); -1754 if (request.hasCompactionState() && request.getCompactionState()) { -1755 builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState())); -1756 } -1757 builder.setSplittable(region.isSplittable()); -1758 builder.setMergeable(region.isMergeable()); -1759 if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) { -1760 builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow)); -1761 } -1762 return builder.build(); -1763} catch (IOException ie) { -1764 throw new ServiceException(ie); -1765} -1766 } -1767 -1768 @Override -1769 @QosPriority(priority=HConstants.ADMIN_QOS) -1770 public GetRegionLoadResponse getRegionLoad(RpcController controller, -1771 GetRegionLoadRequest request) throws ServiceException { -1772 -1773Listregions; -1774if (request.hasTableName()) { -1775 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); -1776 regions = regionServer.getRegions(tableName); -1777} else { -1778 regions = regionServer.getRegions(); -1779} -1780List rLoads = new ArrayList<>(regions.size()); -1781RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); -1782RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); -1783 -1784try { -1785 for (HRegion region : regions) { -1786 rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); -1787 } -1788} catch (IOException e) { -1789 throw new ServiceException(e); -1790} -1791GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); -1792builder.addAllRegionLoads(rLoads); -1793return builder.build(); -1794 } -1795 -1796 @Override -1797 @QosPriority(priority=HConstants.ADMIN_QOS) -1798 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, -1799ClearCompactionQueuesRequest request) throws ServiceException { -1800LOG.debug("Client=" + RpcServer.getRequestUserName().orElse(null) + "/" -1801+ RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue"); -1802 ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder(); -1803requestCount.increment(); -1804if (clearCompactionQueues.compareAndSet(false,true)) { -1805 try { -1806checkOpen(); -1807 regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues(); -1808for (String queueName : request.getQueueNameList()) { -1809 LOG.debug("clear " + queueName + " compaction queue"); -1810 switch (queueName) { -1811case "long": -1812 regionServer.compactSplitThread.clearLongCompactionsQueue(); -1813 break; -1814case "short": -1815 regionServer.compactSplitThread.clearShortCompactionsQueue(); +1739 boolean shouldSplit = true; +1740 if (request.hasBestSplitRow() && request.getBestSplitRow()) { +1741HRegion r = region; +1742 region.startRegionOperation(Operation.SPLIT_REGION); +1743r.forceSplit(null); +1744// Even after setting force split if split policy says no to split then we should not split. +1745shouldSplit = region.getSplitPolicy().shouldSplit() &
[20/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html index 33418d0..3647d12 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html @@ -30,293 +30,296 @@ 022import java.io.IOException; 023import java.io.RandomAccessFile; 024import java.nio.ByteBuffer; -025import java.nio.channels.ClosedChannelException; -026import java.nio.channels.FileChannel; -027import java.util.Arrays; -028import org.apache.hadoop.hbase.io.hfile.Cacheable; -029import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; -030import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; -031import org.apache.hadoop.hbase.nio.ByteBuff; -032import org.apache.hadoop.hbase.nio.SingleByteBuff; -033import org.apache.hadoop.util.StringUtils; -034import org.apache.yetus.audience.InterfaceAudience; -035import org.slf4j.Logger; -036import org.slf4j.LoggerFactory; -037 -038import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -039import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -040 -041/** -042 * IO engine that stores data to a file on the local file system. -043 */ -044@InterfaceAudience.Private -045public class FileIOEngine implements IOEngine { -046 private static final Logger LOG = LoggerFactory.getLogger(FileIOEngine.class); -047 public static final String FILE_DELIMITER = ","; -048 private final String[] filePaths; -049 private final FileChannel[] fileChannels; -050 private final RandomAccessFile[] rafs; -051 -052 private final long sizePerFile; -053 private final long capacity; -054 -055 private FileReadAccessor readAccessor = new FileReadAccessor(); -056 private FileWriteAccessor writeAccessor = new FileWriteAccessor(); -057 -058 public FileIOEngine(long capacity, boolean maintainPersistence, String... filePaths) -059 throws IOException { -060this.sizePerFile = capacity / filePaths.length; -061this.capacity = this.sizePerFile * filePaths.length; -062this.filePaths = filePaths; -063this.fileChannels = new FileChannel[filePaths.length]; -064if (!maintainPersistence) { -065 for (String filePath : filePaths) { -066File file = new File(filePath); -067if (file.exists()) { -068 if (LOG.isDebugEnabled()) { -069LOG.debug("File " + filePath + " already exists. Deleting!!"); -070 } -071 file.delete(); -072 // If deletion fails still we can manage with the writes -073} -074 } -075} -076this.rafs = new RandomAccessFile[filePaths.length]; -077for (int i = 0; i < filePaths.length; i++) { -078 String filePath = filePaths[i]; -079 try { -080rafs[i] = new RandomAccessFile(filePath, "rw"); -081long totalSpace = new File(filePath).getTotalSpace(); -082if (totalSpace < sizePerFile) { -083 // The next setting length will throw exception,logging this message -084 // is just used for the detail reason of exceptionï¼ -085 String msg = "Only " + StringUtils.byteDesc(totalSpace) -086 + " total space under " + filePath + ", not enough for requested " -087 + StringUtils.byteDesc(sizePerFile); -088 LOG.warn(msg); -089} -090rafs[i].setLength(sizePerFile); -091fileChannels[i] = rafs[i].getChannel(); -092LOG.info("Allocating cache " + StringUtils.byteDesc(sizePerFile) -093+ ", on the path:" + filePath); -094 } catch (IOException fex) { -095LOG.error("Failed allocating cache on " + filePath, fex); -096shutdown(); -097throw fex; -098 } -099} -100 } -101 -102 @Override -103 public String toString() { -104return "ioengine=" + this.getClass().getSimpleName() + ", paths=" -105+ Arrays.asList(filePaths) + ", capacity=" + String.format("%,d", this.capacity); -106 } -107 -108 /** -109 * File IO engine is always able to support persistent storage for the cache -110 * @return true -111 */ -112 @Override -113 public boolean isPersistent() { -114return true; -115 } -116 -117 /** -118 * Transfers data from file to the given byte buffer -119 * @param offset The offset in the file where the first byte to be read -120 * @param length The length of buffer that should be allocated for reading -121 * from the file channel -122 * @return number of bytes read -123 * @throws IOException -124 */ -125 @
[18/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html index 33418d0..3647d12 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html @@ -30,293 +30,296 @@ 022import java.io.IOException; 023import java.io.RandomAccessFile; 024import java.nio.ByteBuffer; -025import java.nio.channels.ClosedChannelException; -026import java.nio.channels.FileChannel; -027import java.util.Arrays; -028import org.apache.hadoop.hbase.io.hfile.Cacheable; -029import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; -030import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; -031import org.apache.hadoop.hbase.nio.ByteBuff; -032import org.apache.hadoop.hbase.nio.SingleByteBuff; -033import org.apache.hadoop.util.StringUtils; -034import org.apache.yetus.audience.InterfaceAudience; -035import org.slf4j.Logger; -036import org.slf4j.LoggerFactory; -037 -038import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -039import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -040 -041/** -042 * IO engine that stores data to a file on the local file system. -043 */ -044@InterfaceAudience.Private -045public class FileIOEngine implements IOEngine { -046 private static final Logger LOG = LoggerFactory.getLogger(FileIOEngine.class); -047 public static final String FILE_DELIMITER = ","; -048 private final String[] filePaths; -049 private final FileChannel[] fileChannels; -050 private final RandomAccessFile[] rafs; -051 -052 private final long sizePerFile; -053 private final long capacity; -054 -055 private FileReadAccessor readAccessor = new FileReadAccessor(); -056 private FileWriteAccessor writeAccessor = new FileWriteAccessor(); -057 -058 public FileIOEngine(long capacity, boolean maintainPersistence, String... filePaths) -059 throws IOException { -060this.sizePerFile = capacity / filePaths.length; -061this.capacity = this.sizePerFile * filePaths.length; -062this.filePaths = filePaths; -063this.fileChannels = new FileChannel[filePaths.length]; -064if (!maintainPersistence) { -065 for (String filePath : filePaths) { -066File file = new File(filePath); -067if (file.exists()) { -068 if (LOG.isDebugEnabled()) { -069LOG.debug("File " + filePath + " already exists. Deleting!!"); -070 } -071 file.delete(); -072 // If deletion fails still we can manage with the writes -073} -074 } -075} -076this.rafs = new RandomAccessFile[filePaths.length]; -077for (int i = 0; i < filePaths.length; i++) { -078 String filePath = filePaths[i]; -079 try { -080rafs[i] = new RandomAccessFile(filePath, "rw"); -081long totalSpace = new File(filePath).getTotalSpace(); -082if (totalSpace < sizePerFile) { -083 // The next setting length will throw exception,logging this message -084 // is just used for the detail reason of exceptionï¼ -085 String msg = "Only " + StringUtils.byteDesc(totalSpace) -086 + " total space under " + filePath + ", not enough for requested " -087 + StringUtils.byteDesc(sizePerFile); -088 LOG.warn(msg); -089} -090rafs[i].setLength(sizePerFile); -091fileChannels[i] = rafs[i].getChannel(); -092LOG.info("Allocating cache " + StringUtils.byteDesc(sizePerFile) -093+ ", on the path:" + filePath); -094 } catch (IOException fex) { -095LOG.error("Failed allocating cache on " + filePath, fex); -096shutdown(); -097throw fex; -098 } -099} -100 } -101 -102 @Override -103 public String toString() { -104return "ioengine=" + this.getClass().getSimpleName() + ", paths=" -105+ Arrays.asList(filePaths) + ", capacity=" + String.format("%,d", this.capacity); -106 } -107 -108 /** -109 * File IO engine is always able to support persistent storage for the cache -110 * @return true -111 */ -112 @Override -113 public boolean isPersistent() { -114return true; -115 } -116 -117 /** -118 * Transfers data from file to the given byte buffer -119 * @param offset The offset in the file where the first byte to be read -120 * @param length The length of buffer that should be allocated for reading -121 * from the file channel -122 * @return number of bytes read -123 * @throws IOException -124 */ -125 @Override -126 public Cacheable read(long offset, int length, CacheableDeserializer
hbase-site git commit: INFRA-10751 Empty commit
Repository: hbase-site Updated Branches: refs/heads/asf-site b2e107448 -> 517ab958a INFRA-10751 Empty commit Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/517ab958 Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/517ab958 Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/517ab958 Branch: refs/heads/asf-site Commit: 517ab958ae0038c3e4fa588426fd551d4dc33512 Parents: b2e1074 Author: jenkins Authored: Wed Mar 28 14:49:13 2018 + Committer: jenkins Committed: Wed Mar 28 14:49:13 2018 + -- --
[07/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.html b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.html deleted file mode 100644 index 5b28049..000 --- a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.html +++ /dev/null @@ -1,714 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd";> - - - - - -TestAdminOnlyOperations (Apache HBase 3.0.0-SNAPSHOT Test API) - - - - - -var methods = {"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10}; -var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; -var altColor = "altColor"; -var rowColor = "rowColor"; -var tableTab = "tableTab"; -var activeTableTab = "activeTableTab"; - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev Class -Next Class - - -Frames -No Frames - - -All Classes - - - - - - - -Summary: -Nested | -Field | -Constr | -Method - - -Detail: -Field | -Constr | -Method - - - - - - - - -org.apache.hadoop.hbase.security.access -Class TestAdminOnlyOperations - - - -https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true"; title="class or interface in java.lang">java.lang.Object - - -org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations - - - - - - - - -public class TestAdminOnlyOperations -extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true"; title="class or interface in java.lang">Object -This class tests operations in MasterRpcServices which require ADMIN access. - It doesn't test all operations which require ADMIN access, only those which get vetted within - MasterRpcServices at the point of entry itself (unlike old approach of using - hooks in AccessController). - - Sidenote: - There is one big difference between how security tests for AccessController hooks work, and how - the tests in this class for security in MasterRpcServices work. - The difference arises because of the way AC & MasterRpcServices get the user. - - In AccessController, it first checks if there is an active rpc user in ObserverContext. If not, - it uses UserProvider for current user. This *might* make sense in the context of coprocessors, - because they can be called outside the context of RPCs. - But in the context of MasterRpcServices, only one way makes sense - RPCServer.getRequestUser(). - - In AC tests, when we do FooUser.runAs on AccessController instance directly, it bypasses - the rpc framework completely, but works because UserProvider provides the correct user, i.e. - FooUser in this case. - - But this doesn't work for the tests here, so we go around by doing complete RPCs. - - - - - - - - - - - -Nested Class Summary - -Nested Classes - -Modifier and Type -Class and Description - - -(package private) static interface -TestAdminOnlyOperations.Action - - -static class -TestAdminOnlyOperations.DummyCpService - - - - - - - - - -Field Summary - -Fields - -Modifier and Type -Field and Description - - -static HBaseClassTestRule -CLASS_RULE - - -private static org.apache.hadoop.conf.Configuration -conf - - -private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String -GROUP_ADMIN - - -private static HBaseTestingUtility -TEST_UTIL - - -private static org.apache.hadoop.hbase.security.User -USER_ADMIN - - -private static org.apache.hadoop.hbase.security.User -USER_GROUP_ADMIN - - -private static org.apache.hadoop.hbase.security.User -USER_NON_ADMIN - - - - - - - - - -Constructor Summary - -Constructors - -Constructor and Description - - -TestAdminOnlyOperations() - - - - - - - - - -Method Summary - -All Methods Static Methods Instance Methods Concrete Methods - -Modifier and Type -Method and Description - - -private static void -enableSecurity(org.apache.hadoop.conf.Configuration conf) - - -static void -setup() - - -void -testCleanerChoreRunning() - - -void -testEnableCatalogJanitor() - - -void -testExecProcedure() - - -void -testExecProcedureWithRet() - - -void -testExecRe
[13/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html index eccc4a3..ebbde54 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html @@ -1744,1869 +1744,1872 @@ 1736 HRegion region = getRegion(request.getRegion()); 1737 RegionInfo info = region.getRegionInfo(); 1738 byte[] bestSplitRow = null; -1739 if (request.hasBestSplitRow() && request.getBestSplitRow()) { -1740HRegion r = region; -1741 region.startRegionOperation(Operation.SPLIT_REGION); -1742r.forceSplit(null); -1743bestSplitRow = r.checkSplit(); -1744// when all table data are in memstore, bestSplitRow = null -1745// try to flush region first -1746if(bestSplitRow == null) { -1747 r.flush(true); -1748 bestSplitRow = r.checkSplit(); -1749} -1750r.clearSplit(); -1751 } -1752 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); -1753 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); -1754 if (request.hasCompactionState() && request.getCompactionState()) { -1755 builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState())); -1756 } -1757 builder.setSplittable(region.isSplittable()); -1758 builder.setMergeable(region.isMergeable()); -1759 if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) { -1760 builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow)); -1761 } -1762 return builder.build(); -1763} catch (IOException ie) { -1764 throw new ServiceException(ie); -1765} -1766 } -1767 -1768 @Override -1769 @QosPriority(priority=HConstants.ADMIN_QOS) -1770 public GetRegionLoadResponse getRegionLoad(RpcController controller, -1771 GetRegionLoadRequest request) throws ServiceException { -1772 -1773Listregions; -1774if (request.hasTableName()) { -1775 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); -1776 regions = regionServer.getRegions(tableName); -1777} else { -1778 regions = regionServer.getRegions(); -1779} -1780List rLoads = new ArrayList<>(regions.size()); -1781RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); -1782RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); -1783 -1784try { -1785 for (HRegion region : regions) { -1786 rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); -1787 } -1788} catch (IOException e) { -1789 throw new ServiceException(e); -1790} -1791GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); -1792builder.addAllRegionLoads(rLoads); -1793return builder.build(); -1794 } -1795 -1796 @Override -1797 @QosPriority(priority=HConstants.ADMIN_QOS) -1798 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, -1799ClearCompactionQueuesRequest request) throws ServiceException { -1800LOG.debug("Client=" + RpcServer.getRequestUserName().orElse(null) + "/" -1801+ RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue"); -1802 ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder(); -1803requestCount.increment(); -1804if (clearCompactionQueues.compareAndSet(false,true)) { -1805 try { -1806checkOpen(); -1807 regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues(); -1808for (String queueName : request.getQueueNameList()) { -1809 LOG.debug("clear " + queueName + " compaction queue"); -1810 switch (queueName) { -1811case "long": -1812 regionServer.compactSplitThread.clearLongCompactionsQueue(); -1813 break; -1814case "short": -1815 regionServer.compactSplitThread.clearShortCompactionsQueue(); +1739 boolean shouldSplit = true; +1740 if (request.hasBestSplitRow() && request.getBestSplitRow()) { +1741HRegion r = region; +1742 region.startRegionOperation(Operation.SPLIT_REGION); +1743r.forceSplit(null); +1744// Even after setting force split if split policy says no to split then we should not split. +1745shouldSplit =
[11/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html index eccc4a3..ebbde54 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html @@ -1744,1869 +1744,1872 @@ 1736 HRegion region = getRegion(request.getRegion()); 1737 RegionInfo info = region.getRegionInfo(); 1738 byte[] bestSplitRow = null; -1739 if (request.hasBestSplitRow() && request.getBestSplitRow()) { -1740HRegion r = region; -1741 region.startRegionOperation(Operation.SPLIT_REGION); -1742r.forceSplit(null); -1743bestSplitRow = r.checkSplit(); -1744// when all table data are in memstore, bestSplitRow = null -1745// try to flush region first -1746if(bestSplitRow == null) { -1747 r.flush(true); -1748 bestSplitRow = r.checkSplit(); -1749} -1750r.clearSplit(); -1751 } -1752 GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); -1753 builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); -1754 if (request.hasCompactionState() && request.getCompactionState()) { -1755 builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState())); -1756 } -1757 builder.setSplittable(region.isSplittable()); -1758 builder.setMergeable(region.isMergeable()); -1759 if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) { -1760 builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow)); -1761 } -1762 return builder.build(); -1763} catch (IOException ie) { -1764 throw new ServiceException(ie); -1765} -1766 } -1767 -1768 @Override -1769 @QosPriority(priority=HConstants.ADMIN_QOS) -1770 public GetRegionLoadResponse getRegionLoad(RpcController controller, -1771 GetRegionLoadRequest request) throws ServiceException { -1772 -1773Listregions; -1774if (request.hasTableName()) { -1775 TableName tableName = ProtobufUtil.toTableName(request.getTableName()); -1776 regions = regionServer.getRegions(tableName); -1777} else { -1778 regions = regionServer.getRegions(); -1779} -1780List rLoads = new ArrayList<>(regions.size()); -1781RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); -1782RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); -1783 -1784try { -1785 for (HRegion region : regions) { -1786 rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); -1787 } -1788} catch (IOException e) { -1789 throw new ServiceException(e); -1790} -1791GetRegionLoadResponse.Builder builder = GetRegionLoadResponse.newBuilder(); -1792builder.addAllRegionLoads(rLoads); -1793return builder.build(); -1794 } -1795 -1796 @Override -1797 @QosPriority(priority=HConstants.ADMIN_QOS) -1798 public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, -1799ClearCompactionQueuesRequest request) throws ServiceException { -1800LOG.debug("Client=" + RpcServer.getRequestUserName().orElse(null) + "/" -1801+ RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue"); -1802 ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder(); -1803requestCount.increment(); -1804if (clearCompactionQueues.compareAndSet(false,true)) { -1805 try { -1806checkOpen(); -1807 regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues(); -1808for (String queueName : request.getQueueNameList()) { -1809 LOG.debug("clear " + queueName + " compaction queue"); -1810 switch (queueName) { -1811case "long": -1812 regionServer.compactSplitThread.clearLongCompactionsQueue(); -1813 break; -1814case "short": -1815 regionServer.compactSplitThread.clearShortCompactionsQueue(); +1739 boolean shouldSplit = true; +1740 if (request.hasBestSplitRow() && request.getBestSplitRow()) { +1741HRegion r = region; +1742 region.startRegionOperation(Operation.SPLIT_REGION); +1743r.forceSplit(null); +1744// Even after setting force split if split policy says no to split then we should not split. +1745shouldSplit = region.getSplitPolicy().shouldSplit() && !info.isMetaRegion(); +1746bestSplitRow = r.checkSplit(); +1747// when all table da
[19/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html index 33418d0..3647d12 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html @@ -30,293 +30,296 @@ 022import java.io.IOException; 023import java.io.RandomAccessFile; 024import java.nio.ByteBuffer; -025import java.nio.channels.ClosedChannelException; -026import java.nio.channels.FileChannel; -027import java.util.Arrays; -028import org.apache.hadoop.hbase.io.hfile.Cacheable; -029import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; -030import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; -031import org.apache.hadoop.hbase.nio.ByteBuff; -032import org.apache.hadoop.hbase.nio.SingleByteBuff; -033import org.apache.hadoop.util.StringUtils; -034import org.apache.yetus.audience.InterfaceAudience; -035import org.slf4j.Logger; -036import org.slf4j.LoggerFactory; -037 -038import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -039import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -040 -041/** -042 * IO engine that stores data to a file on the local file system. -043 */ -044@InterfaceAudience.Private -045public class FileIOEngine implements IOEngine { -046 private static final Logger LOG = LoggerFactory.getLogger(FileIOEngine.class); -047 public static final String FILE_DELIMITER = ","; -048 private final String[] filePaths; -049 private final FileChannel[] fileChannels; -050 private final RandomAccessFile[] rafs; -051 -052 private final long sizePerFile; -053 private final long capacity; -054 -055 private FileReadAccessor readAccessor = new FileReadAccessor(); -056 private FileWriteAccessor writeAccessor = new FileWriteAccessor(); -057 -058 public FileIOEngine(long capacity, boolean maintainPersistence, String... filePaths) -059 throws IOException { -060this.sizePerFile = capacity / filePaths.length; -061this.capacity = this.sizePerFile * filePaths.length; -062this.filePaths = filePaths; -063this.fileChannels = new FileChannel[filePaths.length]; -064if (!maintainPersistence) { -065 for (String filePath : filePaths) { -066File file = new File(filePath); -067if (file.exists()) { -068 if (LOG.isDebugEnabled()) { -069LOG.debug("File " + filePath + " already exists. Deleting!!"); -070 } -071 file.delete(); -072 // If deletion fails still we can manage with the writes -073} -074 } -075} -076this.rafs = new RandomAccessFile[filePaths.length]; -077for (int i = 0; i < filePaths.length; i++) { -078 String filePath = filePaths[i]; -079 try { -080rafs[i] = new RandomAccessFile(filePath, "rw"); -081long totalSpace = new File(filePath).getTotalSpace(); -082if (totalSpace < sizePerFile) { -083 // The next setting length will throw exception,logging this message -084 // is just used for the detail reason of exceptionï¼ -085 String msg = "Only " + StringUtils.byteDesc(totalSpace) -086 + " total space under " + filePath + ", not enough for requested " -087 + StringUtils.byteDesc(sizePerFile); -088 LOG.warn(msg); -089} -090rafs[i].setLength(sizePerFile); -091fileChannels[i] = rafs[i].getChannel(); -092LOG.info("Allocating cache " + StringUtils.byteDesc(sizePerFile) -093+ ", on the path:" + filePath); -094 } catch (IOException fex) { -095LOG.error("Failed allocating cache on " + filePath, fex); -096shutdown(); -097throw fex; -098 } -099} -100 } -101 -102 @Override -103 public String toString() { -104return "ioengine=" + this.getClass().getSimpleName() + ", paths=" -105+ Arrays.asList(filePaths) + ", capacity=" + String.format("%,d", this.capacity); -106 } -107 -108 /** -109 * File IO engine is always able to support persistent storage for the cache -110 * @return true -111 */ -112 @Override -113 public boolean isPersistent() { -114return true; -115 } -116 -117 /** -118 * Transfers data from file to the given byte buffer -119 * @param offset The offset in the file where the first byte to be read -120 * @param length The length of buffer that should be allocated for reading -121 * from the file channel -122 * @return number of bytes read -123 * @throws IOException -124 */ -1
[22/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/checkstyle.rss -- diff --git a/checkstyle.rss b/checkstyle.rss index ba8ce97..2359974 100644 --- a/checkstyle.rss +++ b/checkstyle.rss @@ -26,7 +26,7 @@ under the License. ©2007 - 2018 The Apache Software Foundation File: 3595, - Errors: 15918, + Errors: 15919, Warnings: 0, Infos: 0 @@ -31784,6 +31784,20 @@ under the License. + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.security.access.TestRpcAccessChecks.java";>org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java + + + 0 + + + 0 + + + 0 + + + + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.util.CompressionTest.java";>org/apache/hadoop/hbase/util/CompressionTest.java @@ -38308,20 +38322,6 @@ under the License. - http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.security.access.TestAdminOnlyOperations.java";>org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.java - - - 0 - - - 0 - - - 0 - - - - http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.TestReversibleScanners.java";>org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java @@ -46675,7 +46675,7 @@ under the License. 0 - 7 + 8 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/coc.html -- diff --git a/coc.html b/coc.html index bbd2f25..77f8b3c 100644 --- a/coc.html +++ b/coc.html @@ -7,7 +7,7 @@ - + Apache HBase – Code of Conduct Policy @@ -178,9 +178,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -368,7 +365,7 @@ email to mailto:priv...@hbase.apache.org";>the priv https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-03-27 + Last Published: 2018-03-28 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/dependencies.html -- diff --git a/dependencies.html b/dependencies.html index 982e932..8b07515 100644 --- a/dependencies.html +++ b/dependencies.html @@ -7,7 +7,7 @@ - + Apache HBase – Project Dependencies @@ -176,9 +176,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -433,7 +430,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-03-27 + Last Published: 2018-03-28 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/dependency-convergence.html -- diff --git a/dependency-convergence.html b/dependency-convergence.html index 625156f..102c74b 100644 --- a/dependency-convergence.html +++ b/dependency-convergence.html @@ -7,7 +7,7 @@ - + Apache HBase – Reactor Dependency Convergence @@ -176,9 +176,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -1098,7 +1095,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-03-27 + Last Published: 2018-03-28 http://git-wip-us.apach
[10/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/export_control.html -- diff --git a/export_control.html b/export_control.html index 01931b7..6d39492 100644 --- a/export_control.html +++ b/export_control.html @@ -7,7 +7,7 @@ - + Apache HBase – Export Control @@ -178,9 +178,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -324,7 +321,7 @@ for more details. https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-03-27 + Last Published: 2018-03-28 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/index.html -- diff --git a/index.html b/index.html index d57637f..6329e26 100644 --- a/index.html +++ b/index.html @@ -7,7 +7,7 @@ - + Apache HBase – Apache HBase⢠Home @@ -176,9 +176,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -428,7 +425,7 @@ Apache HBase is an open-source, distributed, versioned, non-relational database https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-03-27 + Last Published: 2018-03-28 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/integration.html -- diff --git a/integration.html b/integration.html index c937ff0..b1323d5 100644 --- a/integration.html +++ b/integration.html @@ -7,7 +7,7 @@ - + Apache HBase – CI Management @@ -176,9 +176,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -284,7 +281,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-03-27 + Last Published: 2018-03-28 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/issue-tracking.html -- diff --git a/issue-tracking.html b/issue-tracking.html index 6b7569b..8a00cfc 100644 --- a/issue-tracking.html +++ b/issue-tracking.html @@ -7,7 +7,7 @@ - + Apache HBase – Issue Management @@ -176,9 +176,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -281,7 +278,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-03-27 + Last Published: 2018-03-28 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/license.html -- diff --git a/license.html b/license.html index c53089f..8f16188 100644 --- a/license.html +++ b/license.html @@ -7,7 +7,7 @@ - + Apache HBase – Project Licenses @@ -176,9 +176,6 @@ Metrics -HBase on Windows - - Cluster replication @@ -484,7 +481,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-03-27 + Last Published: 2018-03-28 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/mail-lists.html -- diff --git a/mail-lists.html b/mail-lists.html index b5e4b0f..15c4e8a 100644 --- a/mail-lists.html +++ b/mail-lists.html @@ -7,7 +7,7 @@ - + Apache HBase – Project Mailing Lists @@ -176,9 +176,6 @@ Metrics -HBase on Windows - -
[04/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAdmin1.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAdmin1.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAdmin1.html index d6fa453..bb14853 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAdmin1.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAdmin1.html @@ -39,1367 +39,1397 @@ 031import java.util.Map; 032import java.util.concurrent.TimeUnit; 033import java.util.concurrent.atomic.AtomicInteger; -034import org.apache.hadoop.hbase.HBaseClassTestRule; -035import org.apache.hadoop.hbase.HBaseTestingUtility; -036import org.apache.hadoop.hbase.HColumnDescriptor; -037import org.apache.hadoop.hbase.HConstants; -038import org.apache.hadoop.hbase.HRegionLocation; -039import org.apache.hadoop.hbase.HTableDescriptor; -040import org.apache.hadoop.hbase.InvalidFamilyOperationException; -041import org.apache.hadoop.hbase.MetaTableAccessor; -042import org.apache.hadoop.hbase.ServerName; -043import org.apache.hadoop.hbase.TableName; -044import org.apache.hadoop.hbase.TableNotDisabledException; -045import org.apache.hadoop.hbase.TableNotEnabledException; -046import org.apache.hadoop.hbase.TableNotFoundException; -047import org.apache.hadoop.hbase.exceptions.MergeRegionException; -048import org.apache.hadoop.hbase.master.LoadBalancer; -049import org.apache.hadoop.hbase.regionserver.HRegion; -050import org.apache.hadoop.hbase.regionserver.HStore; -051import org.apache.hadoop.hbase.regionserver.HStoreFile; -052import org.apache.hadoop.hbase.testclassification.ClientTests; -053import org.apache.hadoop.hbase.testclassification.LargeTests; -054import org.apache.hadoop.hbase.util.Bytes; -055import org.apache.hadoop.hbase.util.FSUtils; -056import org.apache.hadoop.hbase.util.Pair; -057import org.junit.After; -058import org.junit.AfterClass; -059import org.junit.Before; -060import org.junit.BeforeClass; -061import org.junit.ClassRule; -062import org.junit.Rule; -063import org.junit.Test; -064import org.junit.experimental.categories.Category; -065import org.junit.rules.TestName; -066import org.slf4j.Logger; -067import org.slf4j.LoggerFactory; -068 -069import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -070import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; -071 -072/** -073 * Class to test HBaseAdmin. -074 * Spins up the minicluster once at test start and then takes it down afterward. -075 * Add any testing of HBaseAdmin functionality here. -076 */ -077@Category({LargeTests.class, ClientTests.class}) -078public class TestAdmin1 { -079 -080 @ClassRule -081 public static final HBaseClassTestRule CLASS_RULE = -082 HBaseClassTestRule.forClass(TestAdmin1.class); -083 -084 private static final Logger LOG = LoggerFactory.getLogger(TestAdmin1.class); -085 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); -086 private Admin admin; -087 -088 @Rule -089 public TestName name = new TestName(); -090 -091 @BeforeClass -092 public static void setUpBeforeClass() throws Exception { -093 TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); -094 TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); -095 TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6); -096 TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); -097TEST_UTIL.startMiniCluster(3); -098 } -099 -100 @AfterClass -101 public static void tearDownAfterClass() throws Exception { -102TEST_UTIL.shutdownMiniCluster(); -103 } -104 -105 @Before -106 public void setUp() throws Exception { -107this.admin = TEST_UTIL.getAdmin(); -108 } -109 -110 @After -111 public void tearDown() throws Exception { -112for (HTableDescriptor htd : this.admin.listTables()) { -113 TEST_UTIL.deleteTable(htd.getTableName()); -114} -115 } -116 -117 @Test -118 public void testSplitFlushCompactUnknownTable() throws InterruptedException { -119final TableName unknowntable = TableName.valueOf(name.getMethodName()); -120Exception exception = null; -121try { -122 this.admin.compact(unknowntable); -123} catch (IOException e) { -124 exception = e; -125} -126assertTrue(exception instanceof TableNotFoundException); -127 -128exception = null; -129try { -130 this.admin.flush(unknowntable); -131} catch (IOException e) { -132 exception = e; -133} -134assertTrue(exception instanceof TableNotFoundException); -135 -136exception = null; -137try { -138 this.admin.split(unknowntable); -139} catch (IOException e) { -140 exception = e; -141} -142assertTrue(exception instanceof TableN
[01/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
Repository: hbase-site Updated Branches: refs/heads/asf-site 53df288a2 -> b2e107448 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.html new file mode 100644 index 000..127be96 --- /dev/null +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.html @@ -0,0 +1,434 @@ +http://www.w3.org/TR/html4/loose.dtd";> + + +Source code + + + + +001 +002/** +003 * Licensed to the Apache Software Foundation (ASF) under one +004 * or more contributor license agreements. See the NOTICE file +005 * distributed with this work for additional information +006 * regarding copyright ownership. The ASF licenses this file +007 * to you under the Apache License, Version 2.0 (the +008 * "License"); you may not use this file except in compliance +009 * with the License. You may obtain a copy of the License at +010 * +011 * http://www.apache.org/licenses/LICENSE-2.0 +012 * +013 * Unless required by applicable law or agreed to in writing, software +014 * distributed under the License is distributed on an "AS IS" BASIS, +015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +016 * See the License for the specific language governing permissions and +017 * limitations under the License. +018 */ +019package org.apache.hadoop.hbase.security.access; +020 +021import static org.apache.hadoop.hbase.AuthUtil.toGroupEntry; +022import static org.junit.Assert.assertArrayEquals; +023import static org.junit.Assert.assertFalse; +024import static org.junit.Assert.assertTrue; +025import static org.junit.Assert.fail; +026import static org.mockito.Mockito.mock; +027 +028import com.google.protobuf.Service; +029import com.google.protobuf.ServiceException; +030import java.io.IOException; +031import java.security.PrivilegedExceptionAction; +032import java.util.Collections; +033import java.util.HashMap; +034import org.apache.hadoop.conf.Configuration; +035import org.apache.hadoop.hbase.Cell; +036import org.apache.hadoop.hbase.CellUtil; +037import org.apache.hadoop.hbase.HBaseClassTestRule; +038import org.apache.hadoop.hbase.HBaseTestingUtility; +039import org.apache.hadoop.hbase.ServerName; +040import org.apache.hadoop.hbase.TableName; +041import org.apache.hadoop.hbase.client.Admin; +042import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +043import org.apache.hadoop.hbase.client.Connection; +044import org.apache.hadoop.hbase.client.ConnectionFactory; +045import org.apache.hadoop.hbase.client.Get; +046import org.apache.hadoop.hbase.client.Put; +047import org.apache.hadoop.hbase.client.Result; +048import org.apache.hadoop.hbase.client.Table; +049import org.apache.hadoop.hbase.client.TableDescriptor; +050import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +051import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +052import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; +053import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; +054import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos; +055import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; +056import org.apache.hadoop.hbase.security.AccessDeniedException; +057import org.apache.hadoop.hbase.security.User; +058import org.apache.hadoop.hbase.testclassification.MediumTests; +059import org.apache.hadoop.hbase.testclassification.SecurityTests; +060import org.apache.hadoop.hbase.util.Bytes; +061import org.junit.BeforeClass; +062import org.junit.ClassRule; +063import org.junit.Rule; +064import org.junit.Test; +065import org.junit.experimental.categories.Category; +066import org.junit.rules.TestName; +067 +068/** +069 * This class tests operations in MasterRpcServices which require ADMIN access. +070 * It doesn't test all operations which require ADMIN access, only those which get vetted within +071 * MasterRpcServices at the point of entry itself (unlike old approach of using +072 * hooks in AccessController). +073 * +074 * Sidenote: +075 * There is one big difference between how security tests for AccessController hooks work, and how +076 * the tests in this class for security in MasterRpcServices work. +077 * The difference arises because of the way AC & MasterRpcServices get the user. +078 * +079 * In AccessController, it first checks if there is an active rpc user in ObserverContext. If not, +080 * it uses UserProvider for current user. This *might* make sense in the context of coprocessors, +081 * because they can be called outside the context of RPCs. +082 * But in the context of MasterRpcServices, only one way makes sense - RPCServer.
[03/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.Action.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.Action.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.Action.html deleted file mode 100644 index af4dcf4..000 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAdminOnlyOperations.Action.html +++ /dev/null @@ -1,340 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd";> - - -Source code - - - - -001 -002/** -003 * Licensed to the Apache Software Foundation (ASF) under one -004 * or more contributor license agreements. See the NOTICE file -005 * distributed with this work for additional information -006 * regarding copyright ownership. The ASF licenses this file -007 * to you under the Apache License, Version 2.0 (the -008 * "License"); you may not use this file except in compliance -009 * with the License. You may obtain a copy of the License at -010 * -011 * http://www.apache.org/licenses/LICENSE-2.0 -012 * -013 * Unless required by applicable law or agreed to in writing, software -014 * distributed under the License is distributed on an "AS IS" BASIS, -015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -016 * See the License for the specific language governing permissions and -017 * limitations under the License. -018 */ -019package org.apache.hadoop.hbase.security.access; -020 -021import static org.apache.hadoop.hbase.AuthUtil.toGroupEntry; -022import static org.junit.Assert.assertTrue; -023import static org.junit.Assert.fail; -024import static org.mockito.Mockito.mock; -025 -026import com.google.protobuf.Service; -027import com.google.protobuf.ServiceException; -028import java.io.IOException; -029import java.security.PrivilegedExceptionAction; -030import java.util.Collections; -031import java.util.HashMap; -032import org.apache.hadoop.conf.Configuration; -033import org.apache.hadoop.hbase.HBaseClassTestRule; -034import org.apache.hadoop.hbase.HBaseTestingUtility; -035import org.apache.hadoop.hbase.ServerName; -036import org.apache.hadoop.hbase.TableName; -037import org.apache.hadoop.hbase.client.Admin; -038import org.apache.hadoop.hbase.client.Connection; -039import org.apache.hadoop.hbase.client.ConnectionFactory; -040import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -041import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; -042import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; -043import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos; -044import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; -045import org.apache.hadoop.hbase.security.AccessDeniedException; -046import org.apache.hadoop.hbase.security.User; -047import org.apache.hadoop.hbase.testclassification.MediumTests; -048import org.apache.hadoop.hbase.testclassification.SecurityTests; -049import org.junit.BeforeClass; -050import org.junit.ClassRule; -051import org.junit.Test; -052import org.junit.experimental.categories.Category; -053 -054/** -055 * This class tests operations in MasterRpcServices which require ADMIN access. -056 * It doesn't test all operations which require ADMIN access, only those which get vetted within -057 * MasterRpcServices at the point of entry itself (unlike old approach of using -058 * hooks in AccessController). -059 * -060 * Sidenote: -061 * There is one big difference between how security tests for AccessController hooks work, and how -062 * the tests in this class for security in MasterRpcServices work. -063 * The difference arises because of the way AC & MasterRpcServices get the user. -064 * -065 * In AccessController, it first checks if there is an active rpc user in ObserverContext. If not, -066 * it uses UserProvider for current user. This *might* make sense in the context of coprocessors, -067 * because they can be called outside the context of RPCs. -068 * But in the context of MasterRpcServices, only one way makes sense - RPCServer.getRequestUser(). -069 * -070 * In AC tests, when we do FooUser.runAs on AccessController instance directly, it bypasses -071 * the rpc framework completely, but works because UserProvider provides the correct user, i.e. -072 * FooUser in this case. -073 * -074 * But this doesn't work for the tests here, so we go around by doing complete RPCs. -075 */ -076@Category({SecurityTests.class, MediumTests.class}) -077public class TestAdminOnlyOperations { -078 @ClassRule -079 public static final HBaseClassTestRule CLASS_RULE = -080 HBaseClassTestRule.forClass(TestAdminOnlyOperations.class); -081 -082 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); -083 private static Configuration conf; -084
hbase git commit: HBASE-16848 Usage for show_peer_tableCFs command doesn't include peer
Repository: hbase Updated Branches: refs/heads/branch-1.3 3791d2e20 -> 42f5b2723 HBASE-16848 Usage for show_peer_tableCFs command doesn't include peer Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/42f5b272 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/42f5b272 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/42f5b272 Branch: refs/heads/branch-1.3 Commit: 42f5b2723f0035861ffc007f00feef3f8ec0d21e Parents: 3791d2e Author: Peter Somogyi Authored: Thu Mar 22 16:22:38 2018 +0100 Committer: Peter Somogyi Committed: Wed Mar 28 10:13:09 2018 +0200 -- hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/42f5b272/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb b/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb index 3ce3d06..037630f 100644 --- a/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb +++ b/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb @@ -25,7 +25,7 @@ module Shell return <<-EOF Show replicable table-cf config for the specified peer. -hbase> show_peer_tableCFs +hbase> show_peer_tableCFs '2' EOF end
hbase git commit: HBASE-20290 Typo in enable_table_replication error message
Repository: hbase Updated Branches: refs/heads/branch-1.2 130f83246 -> 0a761c0ae HBASE-20290 Typo in enable_table_replication error message Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0a761c0a Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0a761c0a Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0a761c0a Branch: refs/heads/branch-1.2 Commit: 0a761c0ae20a8757fed31661598096a07ecf0749 Parents: 130f832 Author: Gabor Bota Authored: Tue Mar 27 18:43:16 2018 +0200 Committer: Peter Somogyi Committed: Wed Mar 28 09:01:56 2018 +0200 -- .../apache/hadoop/hbase/client/replication/ReplicationAdmin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/0a761c0a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 04cb5bf..7d870f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -622,7 +622,7 @@ public class ReplicationAdmin implements Closeable { } else if (!peerHtd.equals(htd)) { throw new IllegalArgumentException("Table " + tableName.getNameAsString() + " exists in peer cluster " + repPeer.getId() -+ ", but the table descriptors are not same when comapred with source cluster." ++ ", but the table descriptors are not same when compared with source cluster." + " Thus can not enable the table's replication switch."); } }