[hbase] branch branch-1.4 updated: HBASE-24032 [Addendum] use equals instead of ==
This is an automated email from the ASF dual-hosted git repository. reidchan pushed a commit to branch branch-1.4 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-1.4 by this push: new 60990b9 HBASE-24032 [Addendum] use equals instead of == 60990b9 is described below commit 60990b9c8e2a303b05aa403da42fe98927c521a2 Author: Reid Chan AuthorDate: Tue Mar 24 14:57:58 2020 +0800 HBASE-24032 [Addendum] use equals instead of == --- .../main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 5bbc40e..3e7a0eb 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -473,7 +473,7 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService groupName = RSGroupInfo.DEFAULT_GROUP; } -if (groupName == RSGroupInfo.DEFAULT_GROUP) { +if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { TableName tableName = desc.getTableName(); groupName = script.getRSGroup( tableName.getNamespaceAsString(),
[hbase] branch branch-2 updated: HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations
This is an automated email from the ASF dual-hosted git repository. reidchan pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 304edcf HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations 304edcf is described below commit 304edcf1afb3f6ccedd9f36d654a55a4d55b6fea Author: Reid Chan AuthorDate: Tue Mar 24 20:12:00 2020 +0800 HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations --- .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 55 + .../hbase/rsgroup/TestRSGroupMappingScript.java| 127 + 2 files changed, 182 insertions(+) diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 727e860..b869e81 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -30,6 +30,7 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; @@ -83,6 +84,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.Permission.Action; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,6 +107,48 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { /** Provider for mapping principal names to Users */ private UserProvider userProvider; + /** Get rsgroup table mapping script */ + private RSGroupMappingScript script; + + // Package visibility for testing + static class RSGroupMappingScript { + +static final String RS_GROUP_MAPPING_SCRIPT = "hbase.rsgroup.table.mapping.script"; +static final String RS_GROUP_MAPPING_SCRIPT_TIMEOUT = + "hbase.rsgroup.table.mapping.script.timeout"; + +private ShellCommandExecutor rsgroupMappingScript; + +RSGroupMappingScript(Configuration conf) { + String script = conf.get(RS_GROUP_MAPPING_SCRIPT); + if (script == null || script.isEmpty()) { +return; + } + + rsgroupMappingScript = new ShellCommandExecutor( +new String[] { script, "", "" }, null, null, +conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds + ); +} + +String getRSGroup(String namespace, String tablename) { + if (rsgroupMappingScript == null) { +return RSGroupInfo.DEFAULT_GROUP; + } + String[] exec = rsgroupMappingScript.getExecString(); + exec[1] = namespace; + exec[2] = tablename; + try { +rsgroupMappingScript.execute(); + } catch (IOException e) { +LOG.error(e.getMessage() + " placing back to default rsgroup"); +return RSGroupInfo.DEFAULT_GROUP; + } + return rsgroupMappingScript.getOutput().trim(); +} + + } + @Override public void start(CoprocessorEnvironment env) throws IOException { if (!(env instanceof HasMasterServices)) { @@ -123,6 +167,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { // set the user-provider. this.userProvider = UserProvider.instantiate(env.getConfiguration()); +this.script = new RSGroupMappingScript(env.getConfiguration()); } @Override @@ -434,6 +479,16 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { if (groupName == null) { groupName = RSGroupInfo.DEFAULT_GROUP; } + +if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { + TableName tableName = desc.getTableName(); + groupName = script.getRSGroup( +tableName.getNamespaceAsString(), +tableName.getQualifierAsString() + ); + LOG.info("rsgroup for " + tableName + " is " + groupName); +} + RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); if (rsGroupInfo == null) { throw new ConstraintException("Default RSGroup (" + groupName + ") for this table's " diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java new file mode 100644 index 000..df2f89b --- /dev/null +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java @
[hbase] branch branch-2.2 updated: HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations
This is an automated email from the ASF dual-hosted git repository. reidchan pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.2 by this push: new 2cad05a HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations 2cad05a is described below commit 2cad05a30f1a78e5244725f1bf369806525d3222 Author: Reid Chan AuthorDate: Tue Mar 24 20:12:00 2020 +0800 HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations Conflicts: hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java --- .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 55 + .../hbase/rsgroup/TestRSGroupMappingScript.java| 127 + 2 files changed, 182 insertions(+) diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 5f8dadb..5e676e0 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -30,6 +30,7 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; @@ -84,6 +85,7 @@ import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -106,6 +108,48 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { /** Provider for mapping principal names to Users */ private UserProvider userProvider; + /** Get rsgroup table mapping script */ + private RSGroupMappingScript script; + + // Package visibility for testing + static class RSGroupMappingScript { + +static final String RS_GROUP_MAPPING_SCRIPT = "hbase.rsgroup.table.mapping.script"; +static final String RS_GROUP_MAPPING_SCRIPT_TIMEOUT = + "hbase.rsgroup.table.mapping.script.timeout"; + +private ShellCommandExecutor rsgroupMappingScript; + +RSGroupMappingScript(Configuration conf) { + String script = conf.get(RS_GROUP_MAPPING_SCRIPT); + if (script == null || script.isEmpty()) { +return; + } + + rsgroupMappingScript = new ShellCommandExecutor( +new String[] { script, "", "" }, null, null, +conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds + ); +} + +String getRSGroup(String namespace, String tablename) { + if (rsgroupMappingScript == null) { +return RSGroupInfo.DEFAULT_GROUP; + } + String[] exec = rsgroupMappingScript.getExecString(); + exec[1] = namespace; + exec[2] = tablename; + try { +rsgroupMappingScript.execute(); + } catch (IOException e) { +LOG.error(e.getMessage() + " placing back to default rsgroup"); +return RSGroupInfo.DEFAULT_GROUP; + } + return rsgroupMappingScript.getOutput().trim(); +} + + } + @Override public void start(CoprocessorEnvironment env) throws IOException { if (!(env instanceof HasMasterServices)) { @@ -125,6 +169,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { // set the user-provider. this.userProvider = UserProvider.instantiate(env.getConfiguration()); +this.script = new RSGroupMappingScript(env.getConfiguration()); } @Override @@ -437,6 +482,16 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { if (groupName == null) { groupName = RSGroupInfo.DEFAULT_GROUP; } + +if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { + TableName tableName = desc.getTableName(); + groupName = script.getRSGroup( +tableName.getNamespaceAsString(), +tableName.getQualifierAsString() + ); + LOG.info("rsgroup for " + tableName + " is " + groupName); +} + RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); if (rsGroupInfo == null) { throw new ConstraintException("Default RSGroup (" + groupName + ") for this table's " diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java new file mode 100644 index 000
[hbase] branch branch-2.3 updated: HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations
This is an automated email from the ASF dual-hosted git repository. reidchan pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 733a1c0 HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations 733a1c0 is described below commit 733a1c06a652703e6796a6043447a0c214df81f7 Author: Reid Chan AuthorDate: Tue Mar 24 20:12:00 2020 +0800 HBASE-24032 [RSGroup] Assign created tables to respective rsgroup automatically instead of manual operations --- .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 55 + .../hbase/rsgroup/TestRSGroupMappingScript.java| 127 + 2 files changed, 182 insertions(+) diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index 727e860..b869e81 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -30,6 +30,7 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; @@ -83,6 +84,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.AccessChecker; import org.apache.hadoop.hbase.security.access.Permission.Action; +import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,6 +107,48 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { /** Provider for mapping principal names to Users */ private UserProvider userProvider; + /** Get rsgroup table mapping script */ + private RSGroupMappingScript script; + + // Package visibility for testing + static class RSGroupMappingScript { + +static final String RS_GROUP_MAPPING_SCRIPT = "hbase.rsgroup.table.mapping.script"; +static final String RS_GROUP_MAPPING_SCRIPT_TIMEOUT = + "hbase.rsgroup.table.mapping.script.timeout"; + +private ShellCommandExecutor rsgroupMappingScript; + +RSGroupMappingScript(Configuration conf) { + String script = conf.get(RS_GROUP_MAPPING_SCRIPT); + if (script == null || script.isEmpty()) { +return; + } + + rsgroupMappingScript = new ShellCommandExecutor( +new String[] { script, "", "" }, null, null, +conf.getLong(RS_GROUP_MAPPING_SCRIPT_TIMEOUT, 5000) // 5 seconds + ); +} + +String getRSGroup(String namespace, String tablename) { + if (rsgroupMappingScript == null) { +return RSGroupInfo.DEFAULT_GROUP; + } + String[] exec = rsgroupMappingScript.getExecString(); + exec[1] = namespace; + exec[2] = tablename; + try { +rsgroupMappingScript.execute(); + } catch (IOException e) { +LOG.error(e.getMessage() + " placing back to default rsgroup"); +return RSGroupInfo.DEFAULT_GROUP; + } + return rsgroupMappingScript.getOutput().trim(); +} + + } + @Override public void start(CoprocessorEnvironment env) throws IOException { if (!(env instanceof HasMasterServices)) { @@ -123,6 +167,7 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { // set the user-provider. this.userProvider = UserProvider.instantiate(env.getConfiguration()); +this.script = new RSGroupMappingScript(env.getConfiguration()); } @Override @@ -434,6 +479,16 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { if (groupName == null) { groupName = RSGroupInfo.DEFAULT_GROUP; } + +if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { + TableName tableName = desc.getTableName(); + groupName = script.getRSGroup( +tableName.getNamespaceAsString(), +tableName.getQualifierAsString() + ); + LOG.info("rsgroup for " + tableName + " is " + groupName); +} + RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); if (rsGroupInfo == null) { throw new ConstraintException("Default RSGroup (" + groupName + ") for this table's " diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.java new file mode 100644 index 000..df2f89b --- /dev/null +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupMappingScript.ja
[hbase-site] branch asf-site updated: INFRA-10751 Empty commit
This is an automated email from the ASF dual-hosted git repository. git-site-role pushed a commit to branch asf-site in repository https://gitbox.apache.org/repos/asf/hbase-site.git The following commit(s) were added to refs/heads/asf-site by this push: new a55c8bf INFRA-10751 Empty commit a55c8bf is described below commit a55c8bff156b84da4882d29d815ace6358d4eeaf Author: jenkins AuthorDate: Tue Mar 24 15:14:16 2020 + INFRA-10751 Empty commit
[hbase] branch branch-2 updated: HBASE-23980 Use enforcer plugin to print JVM info in maven output
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 7d89a98 HBASE-23980 Use enforcer plugin to print JVM info in maven output 7d89a98 is described below commit 7d89a9870b0d3489545f96c01766a97df661a789 Author: Nick Dimiduk AuthorDate: Thu Mar 19 16:52:51 2020 -0700 HBASE-23980 Use enforcer plugin to print JVM info in maven output Does what it says on the tin. Bound to `initialize` phase so that it runs early in lifecycle. Uses `false` so that the plugin will run only for the base pom's reactor stage and not for any children. Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel --- pom.xml | 8 1 file changed, 8 insertions(+) diff --git a/pom.xml b/pom.xml index 7e7bad8..00f323f 100755 --- a/pom.xml +++ b/pom.xml @@ -877,6 +877,14 @@ maven-enforcer-plugin +display-info +initialize + + display-info + +false + + hadoop-profile-min-maven-min-java-banned-xerces enforce
[hbase] branch branch-2.3 updated: HBASE-23980 Use enforcer plugin to print JVM info in maven output
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 035d7cb HBASE-23980 Use enforcer plugin to print JVM info in maven output 035d7cb is described below commit 035d7cb3607ed7fd9d5e3e32473de7e841f2bf22 Author: Nick Dimiduk AuthorDate: Thu Mar 19 16:52:51 2020 -0700 HBASE-23980 Use enforcer plugin to print JVM info in maven output Does what it says on the tin. Bound to `initialize` phase so that it runs early in lifecycle. Uses `false` so that the plugin will run only for the base pom's reactor stage and not for any children. Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel --- pom.xml | 8 1 file changed, 8 insertions(+) diff --git a/pom.xml b/pom.xml index 0929e6e..6086238 100755 --- a/pom.xml +++ b/pom.xml @@ -877,6 +877,14 @@ maven-enforcer-plugin +display-info +initialize + + display-info + +false + + hadoop-profile-min-maven-min-java-banned-xerces enforce
[hbase] branch branch-1 updated: HBASE-23980 Use enforcer plugin to print JVM info in maven output
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-1 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-1 by this push: new 61e17d7 HBASE-23980 Use enforcer plugin to print JVM info in maven output 61e17d7 is described below commit 61e17d78d17f2301bcbe85ae2e2945803dc3b92c Author: Nick Dimiduk AuthorDate: Thu Mar 19 16:52:51 2020 -0700 HBASE-23980 Use enforcer plugin to print JVM info in maven output Does what it says on the tin. Bound to `initialize` phase so that it runs early in lifecycle. Uses `false` so that the plugin will run only for the base pom's reactor stage and not for any children. Signed-off-by: Viraj Jasani Signed-off-by: Jan Hentschel --- pom.xml | 8 1 file changed, 8 insertions(+) diff --git a/pom.xml b/pom.xml index 1688e59..afc6d2e 100644 --- a/pom.xml +++ b/pom.xml @@ -858,6 +858,14 @@ +display-info +initialize + + display-info + +false + + hadoop-profile-min-maven-min-java-banned-xerces enforce
[hbase] branch master updated: HBASE-24002 shadedjars check does not propagate --hadoop-profile
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new aa53493 HBASE-24002 shadedjars check does not propagate --hadoop-profile aa53493 is described below commit aa53493ae83852057dcd9c9380d5953a79fc719e Author: Nick Dimiduk AuthorDate: Tue Mar 17 12:32:48 2020 -0700 HBASE-24002 shadedjars check does not propagate --hadoop-profile This implementation is almost surely incorrect. Personality initialization parses the `--hadoop-profile` argument and sets `HADOOP_PROFILE`. That value is then used to build an `extras` value that is passed along to module initialization. I'm guessing that the `extras` value need to be honored down in the shadedjars module. I'm not clear on how to make that work (need to study the interfaces at play here), so taking the more ham-handed approach of referring to `HADOOP_PROFILE`. I'm not sure if this will even work, or if it will only work because the `foo_yetus.sh` scripts happen to use a variable of the same name. Signed-off-by: Jan Hentschel Signed-off-by: stack --- dev-support/hbase-personality.sh | 14 +- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 50a7080..3fe9fd9 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -464,13 +464,17 @@ function shadedjars_rebuild start_clock + local -a maven_args=('clean' 'verify' '-fae' '--batch-mode' +'-pl' 'hbase-shaded/hbase-shaded-check-invariants' '-am' +'-Dtest=NoUnitTests' '-DHBasePatchProcess' '-Prelease' +'-Dmaven.javadoc.skip=true' '-Dcheckstyle.skip=true' '-Dspotbugs.skip=true') + if [[ -n "${HADOOP_PROFILE}" ]]; then +maven_args+=("-Dhadoop.profile=${HADOOP_PROFILE}") + fi + # disabled because "maven_executor" needs to return both command and args # shellcheck disable=2046 - echo_and_redirect "${logfile}" \ -$(maven_executor) clean verify -fae --batch-mode \ - -pl hbase-shaded/hbase-shaded-check-invariants -am \ - -Dtest=NoUnitTests -DHBasePatchProcess -Prelease \ - -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true + echo_and_redirect "${logfile}" $(maven_executor) "${maven_args[@]}" count=$(${GREP} -c '\[ERROR\]' "${logfile}") if [[ ${count} -gt 0 ]]; then
[hbase] branch branch-2.3 updated: HBASE-24002 shadedjars check does not propagate --hadoop-profile
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 050383e HBASE-24002 shadedjars check does not propagate --hadoop-profile 050383e is described below commit 050383e96d09e6430ef6b0873a7345d23fe5e7a8 Author: Nick Dimiduk AuthorDate: Tue Mar 17 12:32:48 2020 -0700 HBASE-24002 shadedjars check does not propagate --hadoop-profile This implementation is almost surely incorrect. Personality initialization parses the `--hadoop-profile` argument and sets `HADOOP_PROFILE`. That value is then used to build an `extras` value that is passed along to module initialization. I'm guessing that the `extras` value need to be honored down in the shadedjars module. I'm not clear on how to make that work (need to study the interfaces at play here), so taking the more ham-handed approach of referring to `HADOOP_PROFILE`. I'm not sure if this will even work, or if it will only work because the `foo_yetus.sh` scripts happen to use a variable of the same name. Signed-off-by: Jan Hentschel Signed-off-by: stack --- dev-support/hbase-personality.sh | 14 +- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh index 0388515..72f0b05 100755 --- a/dev-support/hbase-personality.sh +++ b/dev-support/hbase-personality.sh @@ -463,13 +463,17 @@ function shadedjars_rebuild start_clock + local -a maven_args=('clean' 'verify' '-fae' '--batch-mode' +'-pl' 'hbase-shaded/hbase-shaded-check-invariants' '-am' +'-Dtest=NoUnitTests' '-DHBasePatchProcess' '-Prelease' +'-Dmaven.javadoc.skip=true' '-Dcheckstyle.skip=true' '-Dspotbugs.skip=true') + if [[ -n "${HADOOP_PROFILE}" ]]; then +maven_args+=("-Dhadoop.profile=${HADOOP_PROFILE}") + fi + # disabled because "maven_executor" needs to return both command and args # shellcheck disable=2046 - echo_and_redirect "${logfile}" \ -$(maven_executor) clean verify -fae --batch-mode \ - -pl hbase-shaded/hbase-shaded-check-invariants -am \ - -Dtest=NoUnitTests -DHBasePatchProcess -Prelease \ - -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true + echo_and_redirect "${logfile}" $(maven_executor) "${maven_args[@]}" count=$(${GREP} -c '\[ERROR\]' "${logfile}") if [[ ${count} -gt 0 ]]; then
[hbase] branch branch-2 updated (7d89a98 -> aba0737)
This is an automated email from the ASF dual-hosted git repository. ndimiduk pushed a change to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git. from 7d89a98 HBASE-23980 Use enforcer plugin to print JVM info in maven output add aba0737 HBASE-24002 shadedjars check does not propagate --hadoop-profile No new revisions were added by this update. Summary of changes: dev-support/hbase-personality.sh | 14 +- 1 file changed, 9 insertions(+), 5 deletions(-)
[hbase] branch master updated (aa53493 -> 9a212ee)
This is an automated email from the ASF dual-hosted git repository. esteban pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git. from aa53493 HBASE-24002 shadedjars check does not propagate --hadoop-profile add 9a212ee HBASE-24041 [regression] Increase RESTServer buffer size back to 64k (#1339) No new revisions were added by this update. Summary of changes: hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java | 4 1 file changed, 4 insertions(+)
[hbase] branch branch-2 updated: HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new d718912 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug d718912 is described below commit d7189127fbc858521f8cf11f2ae5c7c623630762 Author: stack AuthorDate: Tue Mar 24 14:36:09 2020 -0700 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java Edit of log about archiving that shows in middle of a table create; try to make it less disorientating. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java Loosen assert. Compaction may have produced a single file only. Allow for this. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java Make this test less furious given it is inline w/ a bunch of unit tests. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java Add debug hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java Add wait on quota table to show up before moving forward; otherwise, attempt at quota setting fails. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Debug hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java Remove asserts that expected regions to still have a presence in fs after merge when a catalogjanitor may have cleaned up parent dirs. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java Catch exception on way out and log it rather than let it fail test. hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java Wait on acl table before proceeding. --- .../hbase/master/procedure/DeleteTableProcedure.java | 5 +++-- .../hadoop/hbase/client/TestAsyncRegionAdminApi.java | 7 +-- .../hbase/client/TestAsyncTableGetMultiThreaded.java | 2 +- .../apache/hadoop/hbase/client/TestFromClientSide3.java| 2 +- .../hbase/quotas/TestQuotaObserverChoreRegionReports.java | 14 -- .../org/apache/hadoop/hbase/regionserver/TestHRegion.java | 2 ++ .../regionserver/TestRegionMergeTransactionOnCluster.java | 4 ++-- .../hadoop/hbase/regionserver/TestRegionReplicas.java | 7 ++- .../access/TestSnapshotScannerHDFSAclController.java | 2 ++ 9 files changed, 34 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index c792316..6c862af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; - import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -319,7 +318,9 @@ public class DeleteTableProcedure .collect(Collectors.toList()); HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tempTableDir, regionDirList); - LOG.debug("Table '{}' archived!", tableName); + if (!regionDirList.isEmpty()) { +LOG.debug("Archived {} regions", tableName); + } } // Archive mob data diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index 33778a7..38f19c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -429,8 +429,11 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { int countAfterSingleFamily = countStoreFilesInFamily(regions, family); assertTrue(countAfter < countBefore); if (!singleFamily) { - if (expectedState == CompactionState.MAJOR) assertTrue(families.length == countAfter); - else assertTrue(families.length < countAfter); + if (expectedState == CompactionState.MAJOR) { +assertEquals(families.length, countAfter); + } else { +assertTrue(families.length <= countAfter); + }
[hbase] branch branch-2.3 updated: HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 75a66f1 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug 75a66f1 is described below commit 75a66f15d5f6edfd0237a62f579e6a652005d0cb Author: stack AuthorDate: Tue Mar 24 14:36:09 2020 -0700 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java Edit of log about archiving that shows in middle of a table create; try to make it less disorientating. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java Loosen assert. Compaction may have produced a single file only. Allow for this. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java Make this test less furious given it is inline w/ a bunch of unit tests. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java Add debug hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java Add wait on quota table to show up before moving forward; otherwise, attempt at quota setting fails. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Debug hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java Remove asserts that expected regions to still have a presence in fs after merge when a catalogjanitor may have cleaned up parent dirs. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java Catch exception on way out and log it rather than let it fail test. hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java Wait on acl table before proceeding. --- .../hbase/master/procedure/DeleteTableProcedure.java | 5 +++-- .../hadoop/hbase/client/TestAsyncRegionAdminApi.java | 7 +-- .../hbase/client/TestAsyncTableGetMultiThreaded.java | 2 +- .../apache/hadoop/hbase/client/TestFromClientSide3.java| 2 +- .../hbase/quotas/TestQuotaObserverChoreRegionReports.java | 14 -- .../org/apache/hadoop/hbase/regionserver/TestHRegion.java | 2 ++ .../regionserver/TestRegionMergeTransactionOnCluster.java | 4 ++-- .../hadoop/hbase/regionserver/TestRegionReplicas.java | 7 ++- .../access/TestSnapshotScannerHDFSAclController.java | 2 ++ 9 files changed, 34 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index c792316..6c862af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; - import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -319,7 +318,9 @@ public class DeleteTableProcedure .collect(Collectors.toList()); HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tempTableDir, regionDirList); - LOG.debug("Table '{}' archived!", tableName); + if (!regionDirList.isEmpty()) { +LOG.debug("Archived {} regions", tableName); + } } // Archive mob data diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index 33778a7..38f19c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -429,8 +429,11 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { int countAfterSingleFamily = countStoreFilesInFamily(regions, family); assertTrue(countAfter < countBefore); if (!singleFamily) { - if (expectedState == CompactionState.MAJOR) assertTrue(families.length == countAfter); - else assertTrue(families.length < countAfter); + if (expectedState == CompactionState.MAJOR) { +assertEquals(families.length, countAfter); + } else { +assertTrue(families.length <= countAfter); + }
[hbase] branch master updated: HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new 2ca0a10 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug 2ca0a10 is described below commit 2ca0a105bc098a8e8b7df64522a91dfe642c5ce4 Author: stack AuthorDate: Tue Mar 24 14:36:09 2020 -0700 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java Edit of log about archiving that shows in middle of a table create; try to make it less disorientating. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java Loosen assert. Compaction may have produced a single file only. Allow for this. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java Make this test less furious given it is inline w/ a bunch of unit tests. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java Add debug hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java Add wait on quota table to show up before moving forward; otherwise, attempt at quota setting fails. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Debug hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java Remove asserts that expected regions to still have a presence in fs after merge when a catalogjanitor may have cleaned up parent dirs. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java Catch exception on way out and log it rather than let it fail test. hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java Wait on acl table before proceeding. --- .../hbase/master/procedure/DeleteTableProcedure.java | 5 +++-- .../hadoop/hbase/client/TestAsyncRegionAdminApi.java | 7 +-- .../hbase/client/TestAsyncTableGetMultiThreaded.java | 2 +- .../apache/hadoop/hbase/client/TestFromClientSide3.java| 2 +- .../hbase/quotas/TestQuotaObserverChoreRegionReports.java | 14 -- .../org/apache/hadoop/hbase/regionserver/TestHRegion.java | 2 ++ .../regionserver/TestRegionMergeTransactionOnCluster.java | 4 ++-- .../hadoop/hbase/regionserver/TestRegionReplicas.java | 7 ++- .../access/TestSnapshotScannerHDFSAclController.java | 2 ++ 9 files changed, 34 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index c792316..6c862af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; - import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -319,7 +318,9 @@ public class DeleteTableProcedure .collect(Collectors.toList()); HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tempTableDir, regionDirList); - LOG.debug("Table '{}' archived!", tableName); + if (!regionDirList.isEmpty()) { +LOG.debug("Archived {} regions", tableName); + } } // Archive mob data diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index f5d823c..bc05a82 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -429,8 +429,11 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { int countAfterSingleFamily = countStoreFilesInFamily(regions, family); assertTrue(countAfter < countBefore); if (!singleFamily) { - if (expectedState == CompactionState.MAJOR) assertTrue(families.length == countAfter); - else assertTrue(families.length < countAfter); + if (expectedState == CompactionState.MAJOR) { +assertEquals(families.length, countAfter); + } else { +assertTrue(families.length <= countAfter); + } }
[hbase] branch branch-2.2 updated: HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.2 by this push: new cd848d4 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug cd848d4 is described below commit cd848d4a7f996a74d29b99710ea352b72855 Author: stack AuthorDate: Tue Mar 24 14:36:09 2020 -0700 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java Edit of log about archiving that shows in middle of a table create; try to make it less disorientating. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java Loosen assert. Compaction may have produced a single file only. Allow for this. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java Make this test less furious given it is inline w/ a bunch of unit tests. hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java Add debug hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaObserverChoreRegionReports.java Add wait on quota table to show up before moving forward; otherwise, attempt at quota setting fails. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Debug hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java Remove asserts that expected regions to still have a presence in fs after merge when a catalogjanitor may have cleaned up parent dirs. hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java Catch exception on way out and log it rather than let it fail test. hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestSnapshotScannerHDFSAclController.java Wait on acl table before proceeding. --- .../hbase/master/procedure/DeleteTableProcedure.java | 5 +++-- .../hadoop/hbase/client/TestAsyncRegionAdminApi.java | 7 +-- .../hbase/client/TestAsyncTableGetMultiThreaded.java | 2 +- .../apache/hadoop/hbase/client/TestFromClientSide3.java| 2 +- .../hbase/quotas/TestQuotaObserverChoreRegionReports.java | 14 -- .../org/apache/hadoop/hbase/regionserver/TestHRegion.java | 2 ++ .../regionserver/TestRegionMergeTransactionOnCluster.java | 4 ++-- .../hadoop/hbase/regionserver/TestRegionReplicas.java | 7 ++- 8 files changed, 32 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index c792316..6c862af 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; - import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -319,7 +318,9 @@ public class DeleteTableProcedure .collect(Collectors.toList()); HFileArchiver.archiveRegions(env.getMasterConfiguration(), fs, mfs.getRootDir(), tempTableDir, regionDirList); - LOG.debug("Table '{}' archived!", tableName); + if (!regionDirList.isEmpty()) { +LOG.debug("Archived {} regions", tableName); + } } // Archive mob data diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index aeff96e..3031693 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -429,8 +429,11 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { int countAfterSingleFamily = countStoreFilesInFamily(regions, family); assertTrue(countAfter < countBefore); if (!singleFamily) { - if (expectedState == CompactionState.MAJOR) assertTrue(families.length == countAfter); - else assertTrue(families.length < countAfter); + if (expectedState == CompactionState.MAJOR) { +assertEquals(families.length, countAfter); + } else { +assertTrue(families.length <= countAfter); + } } else { int singleFamDiff = countBeforeSingleFamily -
[hbase] branch master updated (2ca0a10 -> 84977ee)
This is an automated email from the ASF dual-hosted git repository. stack pushed a change to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git. from 2ca0a10 HBASE-24043 [Flakey Tests] TestAsyncRegionAdminApi, TestRegionMergeTransactionOnCluster fixes and debug add 84977ee HBASE-8868. add metric to report client shortcircuit reads. (#1334) No new revisions were added by this update. Summary of changes: .../regionserver/MetricsRegionServerSource.java| 11 .../regionserver/MetricsRegionServerWrapper.java | 20 +++ .../MetricsRegionServerSourceImpl.java | 12 + .../hadoop/hbase/io/FSDataInputStreamWrapper.java | 63 +- .../MetricsRegionServerWrapperImpl.java| 21 .../MetricsRegionServerWrapperStub.java| 20 +++ .../regionserver/TestRegionServerMetrics.java | 19 +++ src/main/asciidoc/_chapters/schema_design.adoc | 7 +++ 8 files changed, 172 insertions(+), 1 deletion(-)
[hbase] branch branch-2 updated: HBASE-8868. add metric to report client shortcircuit reads. (#1334)
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 8521207 HBASE-8868. add metric to report client shortcircuit reads. (#1334) 8521207 is described below commit 8521207be4c4645f0f92318f535d2e0e7f9dea94 Author: Wei-Chiu Chuang AuthorDate: Tue Mar 24 15:30:08 2020 -0700 HBASE-8868. add metric to report client shortcircuit reads. (#1334) Signed-off-by: stack --- .../regionserver/MetricsRegionServerSource.java| 11 .../regionserver/MetricsRegionServerWrapper.java | 20 +++ .../MetricsRegionServerSourceImpl.java | 12 + .../hadoop/hbase/io/FSDataInputStreamWrapper.java | 63 +- .../MetricsRegionServerWrapperImpl.java| 21 .../MetricsRegionServerWrapperStub.java| 20 +++ .../regionserver/TestRegionServerMetrics.java | 19 +++ src/main/asciidoc/_chapters/schema_design.adoc | 7 +++ 8 files changed, 172 insertions(+), 1 deletion(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 8f8a12d..958495a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -474,6 +474,17 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String HEDGED_READ_WINS_DESC = "The number of times we started a hedged read and a hedged read won"; + String TOTAL_BYTES_READ = "totalBytesRead"; + String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS"; + String LOCAL_BYTES_READ = "localBytesRead"; + String LOCAL_BYTES_READ_DESC = + "The number of bytes read from the local HDFS DataNode"; + String SHORTCIRCUIT_BYTES_READ = "shortCircuitBytesRead"; + String SHORTCIRCUIT_BYTES_READ_DESC = "The number of bytes read through HDFS short circuit read"; + String ZEROCOPY_BYTES_READ = "zeroCopyBytesRead"; + String ZEROCOPY_BYTES_READ_DESC = + "The number of bytes read through HDFS zero copy"; + String BLOCKED_REQUESTS_COUNT = "blockedRequestCount"; String BLOCKED_REQUESTS_COUNT_DESC = "The number of blocked requests because of memstore size is " + "larger than blockingMemStoreSize"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index e616753..2ed6ab4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -443,6 +443,26 @@ public interface MetricsRegionServerWrapper { long getHedgedReadWins(); /** + * @return Number of total bytes read from HDFS. + */ + long getTotalBytesRead(); + + /** + * @return Number of bytes read from the local HDFS DataNode. + */ + long getLocalBytesRead(); + + /** + * @return Number of bytes read locally through HDFS short circuit. + */ + long getShortCircuitBytesRead(); + + /** + * @return Number of bytes read locally through HDFS zero copy. + */ + long getZeroCopyBytesRead(); + + /** * @return Count of requests blocked because the memstore size is larger than blockingMemStoreSize */ long getBlockedRequestsCount(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 55aa65e..4af8bec 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -506,6 +506,18 @@ public class MetricsRegionServerSourceImpl .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), rsWrap.getPercentFileLocalSecondaryRegions()) +.addGauge(Interns.info(TOTAL_BYTES_READ, +TOTAL_BYTES_READ_DESC), +rsWrap.getTotalBytesRead()) +.addGauge(Interns.info(LOCAL_BYTES_READ, +LOCAL_BYTES_READ_DESC), +rsWrap.getLocalBytesRead()) +.addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, +SHORTCIRCUIT_BYTES_READ_DESC), +r
[hbase] branch branch-2.3 updated: HBASE-8868. add metric to report client shortcircuit reads. (#1334)
This is an automated email from the ASF dual-hosted git repository. stack pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 4ec1909 HBASE-8868. add metric to report client shortcircuit reads. (#1334) 4ec1909 is described below commit 4ec19096dfaa4ff5ac1e8504f44d8d0be6e2eb2c Author: Wei-Chiu Chuang AuthorDate: Tue Mar 24 15:30:08 2020 -0700 HBASE-8868. add metric to report client shortcircuit reads. (#1334) Signed-off-by: stack --- .../regionserver/MetricsRegionServerSource.java| 11 .../regionserver/MetricsRegionServerWrapper.java | 20 +++ .../MetricsRegionServerSourceImpl.java | 12 + .../hadoop/hbase/io/FSDataInputStreamWrapper.java | 63 +- .../MetricsRegionServerWrapperImpl.java| 21 .../MetricsRegionServerWrapperStub.java| 20 +++ .../regionserver/TestRegionServerMetrics.java | 19 +++ src/main/asciidoc/_chapters/schema_design.adoc | 7 +++ 8 files changed, 172 insertions(+), 1 deletion(-) diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 8f8a12d..958495a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -474,6 +474,17 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String HEDGED_READ_WINS_DESC = "The number of times we started a hedged read and a hedged read won"; + String TOTAL_BYTES_READ = "totalBytesRead"; + String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS"; + String LOCAL_BYTES_READ = "localBytesRead"; + String LOCAL_BYTES_READ_DESC = + "The number of bytes read from the local HDFS DataNode"; + String SHORTCIRCUIT_BYTES_READ = "shortCircuitBytesRead"; + String SHORTCIRCUIT_BYTES_READ_DESC = "The number of bytes read through HDFS short circuit read"; + String ZEROCOPY_BYTES_READ = "zeroCopyBytesRead"; + String ZEROCOPY_BYTES_READ_DESC = + "The number of bytes read through HDFS zero copy"; + String BLOCKED_REQUESTS_COUNT = "blockedRequestCount"; String BLOCKED_REQUESTS_COUNT_DESC = "The number of blocked requests because of memstore size is " + "larger than blockingMemStoreSize"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index e616753..2ed6ab4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -443,6 +443,26 @@ public interface MetricsRegionServerWrapper { long getHedgedReadWins(); /** + * @return Number of total bytes read from HDFS. + */ + long getTotalBytesRead(); + + /** + * @return Number of bytes read from the local HDFS DataNode. + */ + long getLocalBytesRead(); + + /** + * @return Number of bytes read locally through HDFS short circuit. + */ + long getShortCircuitBytesRead(); + + /** + * @return Number of bytes read locally through HDFS zero copy. + */ + long getZeroCopyBytesRead(); + + /** * @return Count of requests blocked because the memstore size is larger than blockingMemStoreSize */ long getBlockedRequestsCount(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 55aa65e..4af8bec 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -506,6 +506,18 @@ public class MetricsRegionServerSourceImpl .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), rsWrap.getPercentFileLocalSecondaryRegions()) +.addGauge(Interns.info(TOTAL_BYTES_READ, +TOTAL_BYTES_READ_DESC), +rsWrap.getTotalBytesRead()) +.addGauge(Interns.info(LOCAL_BYTES_READ, +LOCAL_BYTES_READ_DESC), +rsWrap.getLocalBytesRead()) +.addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, +SHORTCIRCUIT_BYTES_READ_DESC), +
[hbase] branch master updated: HBASE-23983 Fixed Spotbugs complaint in RegionStates related to ignored return value
This is an automated email from the ASF dual-hosted git repository. janh pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new aa966e3 HBASE-23983 Fixed Spotbugs complaint in RegionStates related to ignored return value aa966e3 is described below commit aa966e354aa02f5572fc06427dfa0617b4e0123b Author: Jan Hentschel AuthorDate: Wed Mar 25 00:02:54 2020 +0100 HBASE-23983 Fixed Spotbugs complaint in RegionStates related to ignored return value Signed-off-by: Duo Zhang --- .../org/apache/hadoop/hbase/master/assignment/RegionStates.java | 6 +- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java index 7c0f6f8..3bb3c4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java @@ -129,7 +129,11 @@ public class RegionStates { synchronized (regionsMapLock) { RegionStateNode node = regionsMap.computeIfAbsent(regionInfo.getRegionName(), key -> new RegionStateNode(regionInfo, regionInTransition)); - encodedRegionsMap.putIfAbsent(regionInfo.getEncodedName(), node); + + if (encodedRegionsMap.get(regionInfo.getEncodedName()) != node) { +encodedRegionsMap.put(regionInfo.getEncodedName(), node); + } + return node; } }
[hbase] branch master updated: HBASE-24037 Add ut for root dir and wal root dir are different (#1336)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/master by this push: new eed730e HBASE-24037 Add ut for root dir and wal root dir are different (#1336) eed730e is described below commit eed730e492420687096e7398c3a73097a66053a5 Author: Guanghao Zhang AuthorDate: Wed Mar 25 10:53:14 2020 +0800 HBASE-24037 Add ut for root dir and wal root dir are different (#1336) Signed-off-by: stack --- .../hadoop/hbase/wal/TestWALSplitToHFile.java | 91 +++--- 1 file changed, 62 insertions(+), 29 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java index 5d762dc..52df813 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java @@ -105,6 +105,7 @@ public class TestWALSplitToHFile { private WALFactory wals; private static final byte[] ROW = Bytes.toBytes("row"); + private static final byte[] QUALIFIER = Bytes.toBytes("q"); private static final byte[] VALUE1 = Bytes.toBytes("value1"); private static final byte[] VALUE2 = Bytes.toBytes("value2"); private static final int countPerFamily = 10; @@ -178,6 +179,12 @@ public class TestWALSplitToHFile { return wal; } + private WAL createWAL(FileSystem fs, Path hbaseRootDir, String logName) throws IOException { +FSHLog wal = new FSHLog(fs, hbaseRootDir, logName, this.conf); +wal.init(); +return wal; + } + private Pair setupTableAndRegion() throws IOException { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); final TableDescriptor td = createBasic3FamilyTD(tableName); @@ -190,29 +197,62 @@ public class TestWALSplitToHFile { return new Pair<>(td, ri); } + private void writeData(TableDescriptor td, HRegion region) throws IOException { +final long timestamp = this.ee.currentTime(); +for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { + region.put(new Put(ROW).addColumn(cfd.getName(), QUALIFIER, timestamp, VALUE1)); +} + } + @Test - public void testCorruptRecoveredHFile() throws Exception { + public void testDifferentRootDirAndWALRootDir() throws Exception { +// Change wal root dir and reset the configuration +Path walRootDir = UTIL.createWALRootDir(); +this.conf = HBaseConfiguration.create(UTIL.getConfiguration()); + +FileSystem walFs = FSUtils.getWALFileSystem(this.conf); +this.oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); +String serverName = +ServerName.valueOf(TEST_NAME.getMethodName() + "-manual", 16010, System.currentTimeMillis()) +.toString(); +this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); +this.logDir = new Path(walRootDir, logName); +this.wals = new WALFactory(conf, TEST_NAME.getMethodName()); + Pair pair = setupTableAndRegion(); TableDescriptor td = pair.getFirst(); RegionInfo ri = pair.getSecond(); -WAL wal = createWAL(this.conf, rootDir, logName); +WAL wal = createWAL(walFs, walRootDir, logName); HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal); -final long timestamp = this.ee.currentTime(); -// Write data and flush -for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { - region.put(new Put(ROW).addColumn(cfd.getName(), Bytes.toBytes("x"), timestamp, VALUE1)); -} -region.flush(true); +writeData(td, region); -// Now assert edits made it in. -Result result1 = region.get(new Get(ROW)); -assertEquals(td.getColumnFamilies().length, result1.size()); +// Now close the region without flush +region.close(true); +wal.shutdown(); +// split the log +WALSplitter.split(walRootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals); + +WAL wal2 = createWAL(walFs, walRootDir, logName); +HRegion region2 = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal2); +Result result2 = region2.get(new Get(ROW)); +assertEquals(td.getColumnFamilies().length, result2.size()); for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { - assertTrue(Bytes.equals(VALUE1, result1.getValue(cfd.getName(), Bytes.toBytes("x"; + assertTrue(Bytes.equals(VALUE1, result2.getValue(cfd.getName(), QUALIFIER))); } + } -// Now close the region + @Test + public void testCorruptRecoveredHFile() throws Exception { +Pair pair = setupTableAndRegion(); +TableDescriptor td = pair.getFirst(); +RegionInfo ri = pair.getSecond(); + +WAL wal = createWAL(this.conf, rootDir, logName); +HRegion region = HRegion.openHReg
[hbase] branch branch-2 updated: HBASE-24037 Add ut for root dir and wal root dir are different (#1336)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 41baf71 HBASE-24037 Add ut for root dir and wal root dir are different (#1336) 41baf71 is described below commit 41baf711ecb7092c4fa966290e794d9c893683dd Author: Guanghao Zhang AuthorDate: Wed Mar 25 10:53:14 2020 +0800 HBASE-24037 Add ut for root dir and wal root dir are different (#1336) Signed-off-by: stack --- .../hadoop/hbase/wal/TestWALSplitToHFile.java | 91 +++--- 1 file changed, 62 insertions(+), 29 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java index 5d762dc..52df813 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java @@ -105,6 +105,7 @@ public class TestWALSplitToHFile { private WALFactory wals; private static final byte[] ROW = Bytes.toBytes("row"); + private static final byte[] QUALIFIER = Bytes.toBytes("q"); private static final byte[] VALUE1 = Bytes.toBytes("value1"); private static final byte[] VALUE2 = Bytes.toBytes("value2"); private static final int countPerFamily = 10; @@ -178,6 +179,12 @@ public class TestWALSplitToHFile { return wal; } + private WAL createWAL(FileSystem fs, Path hbaseRootDir, String logName) throws IOException { +FSHLog wal = new FSHLog(fs, hbaseRootDir, logName, this.conf); +wal.init(); +return wal; + } + private Pair setupTableAndRegion() throws IOException { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); final TableDescriptor td = createBasic3FamilyTD(tableName); @@ -190,29 +197,62 @@ public class TestWALSplitToHFile { return new Pair<>(td, ri); } + private void writeData(TableDescriptor td, HRegion region) throws IOException { +final long timestamp = this.ee.currentTime(); +for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { + region.put(new Put(ROW).addColumn(cfd.getName(), QUALIFIER, timestamp, VALUE1)); +} + } + @Test - public void testCorruptRecoveredHFile() throws Exception { + public void testDifferentRootDirAndWALRootDir() throws Exception { +// Change wal root dir and reset the configuration +Path walRootDir = UTIL.createWALRootDir(); +this.conf = HBaseConfiguration.create(UTIL.getConfiguration()); + +FileSystem walFs = FSUtils.getWALFileSystem(this.conf); +this.oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); +String serverName = +ServerName.valueOf(TEST_NAME.getMethodName() + "-manual", 16010, System.currentTimeMillis()) +.toString(); +this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); +this.logDir = new Path(walRootDir, logName); +this.wals = new WALFactory(conf, TEST_NAME.getMethodName()); + Pair pair = setupTableAndRegion(); TableDescriptor td = pair.getFirst(); RegionInfo ri = pair.getSecond(); -WAL wal = createWAL(this.conf, rootDir, logName); +WAL wal = createWAL(walFs, walRootDir, logName); HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal); -final long timestamp = this.ee.currentTime(); -// Write data and flush -for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { - region.put(new Put(ROW).addColumn(cfd.getName(), Bytes.toBytes("x"), timestamp, VALUE1)); -} -region.flush(true); +writeData(td, region); -// Now assert edits made it in. -Result result1 = region.get(new Get(ROW)); -assertEquals(td.getColumnFamilies().length, result1.size()); +// Now close the region without flush +region.close(true); +wal.shutdown(); +// split the log +WALSplitter.split(walRootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals); + +WAL wal2 = createWAL(walFs, walRootDir, logName); +HRegion region2 = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal2); +Result result2 = region2.get(new Get(ROW)); +assertEquals(td.getColumnFamilies().length, result2.size()); for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { - assertTrue(Bytes.equals(VALUE1, result1.getValue(cfd.getName(), Bytes.toBytes("x"; + assertTrue(Bytes.equals(VALUE1, result2.getValue(cfd.getName(), QUALIFIER))); } + } -// Now close the region + @Test + public void testCorruptRecoveredHFile() throws Exception { +Pair pair = setupTableAndRegion(); +TableDescriptor td = pair.getFirst(); +RegionInfo ri = pair.getSecond(); + +WAL wal = createWAL(this.conf, rootDir, logName); +HRegion region = HRegion.open
[hbase] branch branch-2.3 updated: HBASE-24037 Add ut for root dir and wal root dir are different (#1336)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 88ac6db HBASE-24037 Add ut for root dir and wal root dir are different (#1336) 88ac6db is described below commit 88ac6db032170239fef5162b1f97909fb0728a94 Author: Guanghao Zhang AuthorDate: Wed Mar 25 10:53:14 2020 +0800 HBASE-24037 Add ut for root dir and wal root dir are different (#1336) Signed-off-by: stack --- .../hadoop/hbase/wal/TestWALSplitToHFile.java | 91 +++--- 1 file changed, 62 insertions(+), 29 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java index 5d762dc..52df813 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplitToHFile.java @@ -105,6 +105,7 @@ public class TestWALSplitToHFile { private WALFactory wals; private static final byte[] ROW = Bytes.toBytes("row"); + private static final byte[] QUALIFIER = Bytes.toBytes("q"); private static final byte[] VALUE1 = Bytes.toBytes("value1"); private static final byte[] VALUE2 = Bytes.toBytes("value2"); private static final int countPerFamily = 10; @@ -178,6 +179,12 @@ public class TestWALSplitToHFile { return wal; } + private WAL createWAL(FileSystem fs, Path hbaseRootDir, String logName) throws IOException { +FSHLog wal = new FSHLog(fs, hbaseRootDir, logName, this.conf); +wal.init(); +return wal; + } + private Pair setupTableAndRegion() throws IOException { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); final TableDescriptor td = createBasic3FamilyTD(tableName); @@ -190,29 +197,62 @@ public class TestWALSplitToHFile { return new Pair<>(td, ri); } + private void writeData(TableDescriptor td, HRegion region) throws IOException { +final long timestamp = this.ee.currentTime(); +for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { + region.put(new Put(ROW).addColumn(cfd.getName(), QUALIFIER, timestamp, VALUE1)); +} + } + @Test - public void testCorruptRecoveredHFile() throws Exception { + public void testDifferentRootDirAndWALRootDir() throws Exception { +// Change wal root dir and reset the configuration +Path walRootDir = UTIL.createWALRootDir(); +this.conf = HBaseConfiguration.create(UTIL.getConfiguration()); + +FileSystem walFs = FSUtils.getWALFileSystem(this.conf); +this.oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); +String serverName = +ServerName.valueOf(TEST_NAME.getMethodName() + "-manual", 16010, System.currentTimeMillis()) +.toString(); +this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName); +this.logDir = new Path(walRootDir, logName); +this.wals = new WALFactory(conf, TEST_NAME.getMethodName()); + Pair pair = setupTableAndRegion(); TableDescriptor td = pair.getFirst(); RegionInfo ri = pair.getSecond(); -WAL wal = createWAL(this.conf, rootDir, logName); +WAL wal = createWAL(walFs, walRootDir, logName); HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal); -final long timestamp = this.ee.currentTime(); -// Write data and flush -for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { - region.put(new Put(ROW).addColumn(cfd.getName(), Bytes.toBytes("x"), timestamp, VALUE1)); -} -region.flush(true); +writeData(td, region); -// Now assert edits made it in. -Result result1 = region.get(new Get(ROW)); -assertEquals(td.getColumnFamilies().length, result1.size()); +// Now close the region without flush +region.close(true); +wal.shutdown(); +// split the log +WALSplitter.split(walRootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals); + +WAL wal2 = createWAL(walFs, walRootDir, logName); +HRegion region2 = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal2); +Result result2 = region2.get(new Get(ROW)); +assertEquals(td.getColumnFamilies().length, result2.size()); for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { - assertTrue(Bytes.equals(VALUE1, result1.getValue(cfd.getName(), Bytes.toBytes("x"; + assertTrue(Bytes.equals(VALUE1, result2.getValue(cfd.getName(), QUALIFIER))); } + } -// Now close the region + @Test + public void testCorruptRecoveredHFile() throws Exception { +Pair pair = setupTableAndRegion(); +TableDescriptor td = pair.getFirst(); +RegionInfo ri = pair.getSecond(); + +WAL wal = createWAL(this.conf, rootDir, logName); +HRegion region = HRegion.
[hbase] branch branch-2 updated: HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 244b308 HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324) 244b308 is described below commit 244b308a3e81bff9ac53f63116dee4593355619c Author: niuyulin AuthorDate: Wed Mar 25 11:27:32 2020 +0800 HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324) Signed-off-by: Guanghao Zhang --- .../hadoop/hbase/rsgroup/RSGroupAdminServer.java | 13 +- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java| 174 - .../balancer/RSGroupableBalancerTestBase.java | 2 +- .../balancer/TestRSGroupBasedLoadBalancer.java | 25 ++- ...lancerWithStochasticLoadBalancerAsInternal.java | 9 +- .../hbase/favored/FavoredNodeLoadBalancer.java | 47 +++--- .../org/apache/hadoop/hbase/master/HMaster.java| 14 +- .../apache/hadoop/hbase/master/LoadBalancer.java | 32 ++-- .../hbase/master/assignment/RegionStates.java | 66 +++- .../hbase/master/balancer/BaseLoadBalancer.java| 44 +- .../master/balancer/FavoredStochasticBalancer.java | 11 +- .../hbase/master/balancer/SimpleLoadBalancer.java | 59 --- .../master/balancer/StochasticLoadBalancer.java| 33 ++-- .../hbase/TestStochasticBalancerJmxMetrics.java| 8 +- .../apache/hadoop/hbase/master/TestBalancer.java | 22 +-- .../hbase/master/balancer/BalancerTestBase.java| 7 +- .../LoadBalancerPerformanceEvaluation.java | 3 +- .../master/balancer/TestBaseLoadBalancer.java | 8 +- ...adBalancer.java => TestSimpleLoadBalancer.java} | 69 .../balancer/TestStochasticLoadBalancer.java | 11 +- .../TestStochasticLoadBalancerBalanceCluster.java | 9 +- ...estStochasticLoadBalancerHeterogeneousCost.java | 9 +- .../TestStochasticLoadBalancerRegionReplica.java | 7 +- 23 files changed, 359 insertions(+), 323 deletions(-) diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index 502d92b..9f038ff 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -453,16 +453,9 @@ public class RSGroupAdminServer implements RSGroupAdmin { } //We balance per group instead of per table - List plans = new ArrayList<>(); - for(Map.Entry>> tableMap: - getRSGroupAssignmentsByTable(groupName).entrySet()) { -LOG.info("Creating partial plan for table {} : {}", tableMap.getKey(), tableMap.getValue()); -List partialPlans = balancer.balanceCluster(tableMap.getValue()); -LOG.info("Partial plan for table {} : {}", tableMap.getKey(), partialPlans); -if (partialPlans != null) { - plans.addAll(partialPlans); -} - } + Map>> assignmentsByTable = +getRSGroupAssignmentsByTable(groupName); + List plans = balancer.balanceCluster(assignmentsByTable); boolean balancerRan = !plans.isEmpty(); if (balancerRan) { LOG.info("RSGroup balance {} starting with plan count: {}", groupName, plans.size()); diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index adb95ea..5e7061a8a 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -22,8 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -32,7 +30,6 @@ import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -42,6 +39,7 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger
[hbase] branch branch-2.3 updated: HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch branch-2.3 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.3 by this push: new 0c545cf HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324) 0c545cf is described below commit 0c545cf4d47e1951e524b358d148b8bf704cd262 Author: niuyulin AuthorDate: Wed Mar 25 11:27:32 2020 +0800 HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324) Signed-off-by: Guanghao Zhang --- .../hadoop/hbase/rsgroup/RSGroupAdminServer.java | 13 +- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java| 174 - .../balancer/RSGroupableBalancerTestBase.java | 2 +- .../balancer/TestRSGroupBasedLoadBalancer.java | 25 ++- ...lancerWithStochasticLoadBalancerAsInternal.java | 9 +- .../hbase/favored/FavoredNodeLoadBalancer.java | 47 +++--- .../org/apache/hadoop/hbase/master/HMaster.java| 14 +- .../apache/hadoop/hbase/master/LoadBalancer.java | 32 ++-- .../hbase/master/assignment/RegionStates.java | 66 +++- .../hbase/master/balancer/BaseLoadBalancer.java| 44 +- .../master/balancer/FavoredStochasticBalancer.java | 11 +- .../hbase/master/balancer/SimpleLoadBalancer.java | 59 --- .../master/balancer/StochasticLoadBalancer.java| 33 ++-- .../hbase/TestStochasticBalancerJmxMetrics.java| 8 +- .../apache/hadoop/hbase/master/TestBalancer.java | 22 +-- .../hbase/master/balancer/BalancerTestBase.java| 7 +- .../LoadBalancerPerformanceEvaluation.java | 3 +- .../master/balancer/TestBaseLoadBalancer.java | 8 +- ...adBalancer.java => TestSimpleLoadBalancer.java} | 69 .../balancer/TestStochasticLoadBalancer.java | 11 +- .../TestStochasticLoadBalancerBalanceCluster.java | 9 +- ...estStochasticLoadBalancerHeterogeneousCost.java | 9 +- .../TestStochasticLoadBalancerRegionReplica.java | 7 +- 23 files changed, 359 insertions(+), 323 deletions(-) diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index 502d92b..9f038ff 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -453,16 +453,9 @@ public class RSGroupAdminServer implements RSGroupAdmin { } //We balance per group instead of per table - List plans = new ArrayList<>(); - for(Map.Entry>> tableMap: - getRSGroupAssignmentsByTable(groupName).entrySet()) { -LOG.info("Creating partial plan for table {} : {}", tableMap.getKey(), tableMap.getValue()); -List partialPlans = balancer.balanceCluster(tableMap.getValue()); -LOG.info("Partial plan for table {} : {}", tableMap.getKey(), partialPlans); -if (partialPlans != null) { - plans.addAll(partialPlans); -} - } + Map>> assignmentsByTable = +getRSGroupAssignmentsByTable(groupName); + List plans = balancer.balanceCluster(assignmentsByTable); boolean balancerRan = !plans.isEmpty(); if (balancerRan) { LOG.info("RSGroup balance {} starting with plan count: {}", groupName, plans.size()); diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index adb95ea..5e7061a8a 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -22,8 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -32,7 +30,6 @@ import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -42,6 +39,7 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Lo
[hbase] branch branch-2.2 updated: HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324)
This is an automated email from the ASF dual-hosted git repository. zghao pushed a commit to branch branch-2.2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2.2 by this push: new 0b96d85 HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324) 0b96d85 is described below commit 0b96d852e00196c0851bbcd82a972c15256535ab Author: niuyulin AuthorDate: Wed Mar 25 11:27:32 2020 +0800 HBASE-23949 refactor loadBalancer implements for rsgroup balance by table to achieve overallbalanced (#1324) Signed-off-by: Guanghao Zhang --- .../hadoop/hbase/rsgroup/RSGroupAdminServer.java | 13 +- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java| 174 - .../balancer/RSGroupableBalancerTestBase.java | 2 +- .../balancer/TestRSGroupBasedLoadBalancer.java | 25 ++- ...lancerWithStochasticLoadBalancerAsInternal.java | 9 +- .../hbase/favored/FavoredNodeLoadBalancer.java | 47 +++--- .../org/apache/hadoop/hbase/master/HMaster.java| 14 +- .../apache/hadoop/hbase/master/LoadBalancer.java | 32 ++-- .../hbase/master/assignment/RegionStates.java | 66 +++- .../hbase/master/balancer/BaseLoadBalancer.java| 44 +- .../master/balancer/FavoredStochasticBalancer.java | 11 +- .../hbase/master/balancer/SimpleLoadBalancer.java | 59 --- .../master/balancer/StochasticLoadBalancer.java| 33 ++-- .../hbase/TestStochasticBalancerJmxMetrics.java| 8 +- .../apache/hadoop/hbase/master/TestBalancer.java | 22 +-- .../hbase/master/balancer/BalancerTestBase.java| 7 +- .../LoadBalancerPerformanceEvaluation.java | 3 +- .../master/balancer/TestBaseLoadBalancer.java | 8 +- ...adBalancer.java => TestSimpleLoadBalancer.java} | 71 + .../balancer/TestStochasticLoadBalancer.java | 11 +- .../TestStochasticLoadBalancerBalanceCluster.java | 9 +- .../TestStochasticLoadBalancerRegionReplica.java | 7 +- 22 files changed, 353 insertions(+), 322 deletions(-) diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index a671d56..9f563a5 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -435,16 +435,9 @@ public class RSGroupAdminServer implements RSGroupAdmin { } //We balance per group instead of per table - List plans = new ArrayList<>(); - for(Map.Entry>> tableMap: - getRSGroupAssignmentsByTable(groupName).entrySet()) { -LOG.info("Creating partial plan for table {} : {}", tableMap.getKey(), tableMap.getValue()); -List partialPlans = balancer.balanceCluster(tableMap.getValue()); -LOG.info("Partial plan for table {} : {}", tableMap.getKey(), partialPlans); -if (partialPlans != null) { - plans.addAll(partialPlans); -} - } + Map>> assignmentsByTable = +getRSGroupAssignmentsByTable(groupName); + List plans = balancer.balanceCluster(assignmentsByTable); boolean balancerRan = !plans.isEmpty(); if (balancerRan) { LOG.info("RSGroup balance {} starting with plan count: {}", groupName, plans.size()); diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index adb95ea..5e7061a8a 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -22,8 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -32,7 +30,6 @@ import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -42,6 +39,7 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -110,50 +108,45 @@ public class RSGroupBasedLoadBal