[hadoop] branch branch-3.3 updated: HADOOP-17891. Exclude snappy-java and lz4-java from relocation in shaded hadoop client libraries (#3385)

2021-09-14 Thread sunchao
This is an automated email from the ASF dual-hosted git repository.

sunchao pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new b9715c2  HADOOP-17891. Exclude snappy-java and lz4-java from 
relocation in shaded hadoop client libraries (#3385)
b9715c2 is described below

commit b9715c29317d0ab5f32dfce11d1d17a6b3211a7f
Author: Liang-Chi Hsieh 
AuthorDate: Tue Sep 14 11:17:18 2021 -0700

HADOOP-17891. Exclude snappy-java and lz4-java from relocation in shaded 
hadoop client libraries (#3385)
---
 dev-support/bin/hadoop.sh  |  11 +-
 hadoop-client-modules/hadoop-client-api/pom.xml|  17 +++
 .../hadoop-client-check-invariants/pom.xml |   2 +
 .../resources/ensure-jars-have-correct-contents.sh |   2 +
 .../hadoop-client-check-test-invariants/pom.xml|   2 +
 .../hadoop-client-integration-tests/pom.xml|   5 +
 .../apache/hadoop/example/ITUseHadoopCodecs.java   | 144 +
 .../hadoop-client-minicluster/pom.xml  |  14 ++
 .../hadoop-client-runtime/pom.xml  |  14 ++
 9 files changed, 209 insertions(+), 2 deletions(-)

diff --git a/dev-support/bin/hadoop.sh b/dev-support/bin/hadoop.sh
index beebea8..526b9d5 100755
--- a/dev-support/bin/hadoop.sh
+++ b/dev-support/bin/hadoop.sh
@@ -512,7 +512,7 @@ function shadedclient_initialize
   maven_add_install shadedclient
 }
 
-## @description build client facing shaded artifacts and test them
+## @description build client facing shaded and non-shaded artifacts and test 
them
 ## @audience private
 ## @stability evolving
 ## @param repostatus
@@ -545,13 +545,20 @@ function shadedclient_rebuild
 return 0
   fi
 
-  big_console_header "Checking client artifacts on ${repostatus}"
+  big_console_header "Checking client artifacts on ${repostatus} with shaded 
clients"
 
   echo_and_redirect "${logfile}" \
 "${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
   "${modules[@]}" \
   -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true 
-Dspotbugs.skip=true
 
+  big_console_header "Checking client artifacts on ${repostatus} with 
non-shaded clients"
+
+  echo_and_redirect "${logfile}" \
+"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
+  "${modules[@]}" \
+  -Pnoshade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true 
-Dcheckstyle.skip=true -Dspotbugs.skip=true
+
   count=$("${GREP}" -c '\[ERROR\]' "${logfile}")
   if [[ ${count} -gt 0 ]]; then
 add_vote_table -1 shadedclient "${repostatus} has errors when building and 
testing our client artifacts."
diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml 
b/hadoop-client-modules/hadoop-client-api/pom.xml
index 9a40aa3..55e6cbe 100644
--- a/hadoop-client-modules/hadoop-client-api/pom.xml
+++ b/hadoop-client-modules/hadoop-client-api/pom.xml
@@ -67,6 +67,13 @@
 
   
 
+
+
+  org.xerial.snappy
+  snappy-java
+
   
   
 
@@ -109,6 +116,10 @@
 
   org.apache.hadoop:*
 
+
+  
+  org.xerial.snappy:*
+
   
   
 
@@ -149,6 +160,9 @@
 org/xml/sax/**/*
 org/bouncycastle/*
 org/bouncycastle/**/*
+
+org/xerial/snappy/*
+org/xerial/snappy/**/*
   
 
 
@@ -225,6 +239,9 @@
 
 net/topology/*
 net/topology/**/*
+
+net/jpountz/*
+net/jpountz/**/*
   
 
 
diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml 
b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
index 4bd256d..187a1a3 100644
--- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
@@ -92,6 +92,8 @@
 com.google.code.findbugs:jsr305
 
 org.bouncycastle:*
+
+org.xerial.snappy:*
   
 
 
diff --git 
a/hadoop-client-modules/hadoop-client-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
 
b/hadoop-client-modules/hadoop-client-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
index 7242ade..2e92740 100644
--- 
a/hadoop-client-modules/hadoop-client-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
+++ 
b/hadoop-client-modules/hadoop-client-check-invariants/sr

[hadoop] branch trunk updated: HADOOP-17891. Exclude snappy-java and lz4-java from relocation in shaded hadoop client libraries (#3385)

2021-09-14 Thread sunchao
This is an automated email from the ASF dual-hosted git repository.

sunchao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b8f7c75  HADOOP-17891. Exclude snappy-java and lz4-java from 
relocation in shaded hadoop client libraries (#3385)
b8f7c75 is described below

commit b8f7c7527a7c33c204315a6ea615b4d3fd237744
Author: Liang-Chi Hsieh 
AuthorDate: Tue Sep 14 11:17:18 2021 -0700

HADOOP-17891. Exclude snappy-java and lz4-java from relocation in shaded 
hadoop client libraries (#3385)
---
 dev-support/bin/hadoop.sh  |  11 +-
 hadoop-client-modules/hadoop-client-api/pom.xml|  17 +++
 .../hadoop-client-check-invariants/pom.xml |   2 +
 .../resources/ensure-jars-have-correct-contents.sh |   2 +
 .../hadoop-client-check-test-invariants/pom.xml|   2 +
 .../hadoop-client-integration-tests/pom.xml|   5 +
 .../apache/hadoop/example/ITUseHadoopCodecs.java   | 144 +
 .../hadoop-client-minicluster/pom.xml  |  14 ++
 .../hadoop-client-runtime/pom.xml  |  14 ++
 9 files changed, 209 insertions(+), 2 deletions(-)

diff --git a/dev-support/bin/hadoop.sh b/dev-support/bin/hadoop.sh
index 28d3ad2..e055519 100755
--- a/dev-support/bin/hadoop.sh
+++ b/dev-support/bin/hadoop.sh
@@ -513,7 +513,7 @@ function shadedclient_initialize
   maven_add_install shadedclient
 }
 
-## @description build client facing shaded artifacts and test them
+## @description build client facing shaded and non-shaded artifacts and test 
them
 ## @audience private
 ## @stability evolving
 ## @param repostatus
@@ -546,13 +546,20 @@ function shadedclient_rebuild
 return 0
   fi
 
-  big_console_header "Checking client artifacts on ${repostatus}"
+  big_console_header "Checking client artifacts on ${repostatus} with shaded 
clients"
 
   echo_and_redirect "${logfile}" \
 "${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
   "${modules[@]}" \
   -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true 
-Dspotbugs.skip=true
 
+  big_console_header "Checking client artifacts on ${repostatus} with 
non-shaded clients"
+
+  echo_and_redirect "${logfile}" \
+"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
+  "${modules[@]}" \
+  -Pnoshade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true 
-Dcheckstyle.skip=true -Dspotbugs.skip=true
+
   count=$("${GREP}" -c '\[ERROR\]' "${logfile}")
   if [[ ${count} -gt 0 ]]; then
 add_vote_table -1 shadedclient "${repostatus} has errors when building and 
testing our client artifacts."
diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml 
b/hadoop-client-modules/hadoop-client-api/pom.xml
index 1a83743..d0d62f5 100644
--- a/hadoop-client-modules/hadoop-client-api/pom.xml
+++ b/hadoop-client-modules/hadoop-client-api/pom.xml
@@ -67,6 +67,13 @@
 
   
 
+
+
+  org.xerial.snappy
+  snappy-java
+
   
   
 
@@ -109,6 +116,10 @@
 
   org.apache.hadoop:*
 
+
+  
+  org.xerial.snappy:*
+
   
   
 
@@ -147,6 +158,9 @@
 org/xml/sax/**/*
 org/bouncycastle/*
 org/bouncycastle/**/*
+
+org/xerial/snappy/*
+org/xerial/snappy/**/*
   
 
 
@@ -223,6 +237,9 @@
 
 net/topology/*
 net/topology/**/*
+
+net/jpountz/*
+net/jpountz/**/*
   
 
 
diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml 
b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
index 6ae9900..9d4bce1 100644
--- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
@@ -90,6 +90,8 @@
 com.google.code.findbugs:jsr305
 
 org.bouncycastle:*
+
+org.xerial.snappy:*
   
 
 
diff --git 
a/hadoop-client-modules/hadoop-client-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
 
b/hadoop-client-modules/hadoop-client-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
index 7242ade..2e92740 100644
--- 
a/hadoop-client-modules/hadoop-client-check-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
+++ 
b/hadoop-client-modules/hadoop-client-check-invariants/src/test/res

[hadoop] branch branch-3.2 updated: YARN-10870. Missing user filtering check -> yarn.webapp.filter-entity-list-by-user for RM Scheduler page. Contributed by Gergely Pollak

2021-09-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4066cb0  YARN-10870. Missing user filtering check -> 
yarn.webapp.filter-entity-list-by-user for RM Scheduler page. Contributed by 
Gergely Pollak
4066cb0 is described below

commit 4066cb0793b2aa49a585384dc798934ef50f8ba5
Author: Szilard Nemeth 
AuthorDate: Tue Sep 14 18:09:35 2021 +0200

YARN-10870. Missing user filtering check -> 
yarn.webapp.filter-entity-list-by-user for RM Scheduler page. Contributed by 
Gergely Pollak
---
 .../webapp/FairSchedulerAppsBlock.java | 70 +++---
 1 file changed, 62 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
index 14ad277..0ba8f1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
@@ -23,18 +23,21 @@ import static 
org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
 
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
+import java.security.Principal;
+import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.text.StringEscapeUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
@@ -49,6 +52,8 @@ import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
 
+import javax.servlet.http.HttpServletRequest;
+
 /**
  * Shows application information specific to the fair
  * scheduler as part of the fair scheduler page.
@@ -58,10 +63,19 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
   final FairSchedulerInfo fsinfo;
   final Configuration conf;
   final ResourceManager rm;
+  final boolean filterAppsByUser;
+
   @Inject
   public FairSchedulerAppsBlock(ResourceManager rm, ViewContext ctx,
   Configuration conf) {
 super(ctx);
+this.conf = conf;
+this.rm = rm;
+
+this.filterAppsByUser  = conf.getBoolean(
+YarnConfiguration.FILTER_ENTITY_LIST_BY_USER,
+YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
+
 FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
 fsinfo = new FairSchedulerInfo(scheduler);
 apps = new ConcurrentHashMap();
@@ -70,13 +84,53 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
   if (!(RMAppState.NEW.equals(entry.getValue().getState())
   || RMAppState.NEW_SAVING.equals(entry.getValue().getState())
   || RMAppState.SUBMITTED.equals(entry.getValue().getState( {
-apps.put(entry.getKey(), entry.getValue());
+if (!filterAppsByUser || hasAccess(entry.getValue(),
+ctx.requestContext().getRequest())) {
+  apps.put(entry.getKey(), entry.getValue());
+}
   }
 }
-this.conf = conf;
-this.rm = rm;
   }
-  
+
+  private UserGroupInformation getCallerUserGroupInformation(
+  HttpServletRequest hsr, boolean usePrincipal) {
+
+String remoteUser = hsr.getRemoteUser();
+if (usePrincipal) {
+  Principal princ = hsr.getUserPrincipal();
+  remoteUser = princ == null ? null : princ.getName();
+}
+
+UserGroupInformation callerUGI = null;
+if (remoteUser != null) {
+  callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
+}
+
+return callerUGI;
+  }
+
+  protected Boolean hasAccess(RMApp app, HttpServletRequest hsr) {
+// Check f

[hadoop] branch branch-3.3 updated: YARN-10870. Missing user filtering check -> yarn.webapp.filter-entity-list-by-user for RM Scheduler page. Contributed by Gergely Pollak

2021-09-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 6c68211  YARN-10870. Missing user filtering check -> 
yarn.webapp.filter-entity-list-by-user for RM Scheduler page. Contributed by 
Gergely Pollak
6c68211 is described below

commit 6c6821106221e95e08fbacefb4c4ef74dc56b8e7
Author: Szilard Nemeth 
AuthorDate: Tue Sep 14 18:08:34 2021 +0200

YARN-10870. Missing user filtering check -> 
yarn.webapp.filter-entity-list-by-user for RM Scheduler page. Contributed by 
Gergely Pollak
---
 .../webapp/FairSchedulerAppsBlock.java | 70 +++---
 1 file changed, 62 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
index 14ad277..0ba8f1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
@@ -23,18 +23,21 @@ import static 
org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE;
 
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
+import java.security.Principal;
+import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.text.StringEscapeUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
@@ -49,6 +52,8 @@ import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
 
+import javax.servlet.http.HttpServletRequest;
+
 /**
  * Shows application information specific to the fair
  * scheduler as part of the fair scheduler page.
@@ -58,10 +63,19 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
   final FairSchedulerInfo fsinfo;
   final Configuration conf;
   final ResourceManager rm;
+  final boolean filterAppsByUser;
+
   @Inject
   public FairSchedulerAppsBlock(ResourceManager rm, ViewContext ctx,
   Configuration conf) {
 super(ctx);
+this.conf = conf;
+this.rm = rm;
+
+this.filterAppsByUser  = conf.getBoolean(
+YarnConfiguration.FILTER_ENTITY_LIST_BY_USER,
+YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
+
 FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
 fsinfo = new FairSchedulerInfo(scheduler);
 apps = new ConcurrentHashMap();
@@ -70,13 +84,53 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
   if (!(RMAppState.NEW.equals(entry.getValue().getState())
   || RMAppState.NEW_SAVING.equals(entry.getValue().getState())
   || RMAppState.SUBMITTED.equals(entry.getValue().getState( {
-apps.put(entry.getKey(), entry.getValue());
+if (!filterAppsByUser || hasAccess(entry.getValue(),
+ctx.requestContext().getRequest())) {
+  apps.put(entry.getKey(), entry.getValue());
+}
   }
 }
-this.conf = conf;
-this.rm = rm;
   }
-  
+
+  private UserGroupInformation getCallerUserGroupInformation(
+  HttpServletRequest hsr, boolean usePrincipal) {
+
+String remoteUser = hsr.getRemoteUser();
+if (usePrincipal) {
+  Principal princ = hsr.getUserPrincipal();
+  remoteUser = princ == null ? null : princ.getName();
+}
+
+UserGroupInformation callerUGI = null;
+if (remoteUser != null) {
+  callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
+}
+
+return callerUGI;
+  }
+
+  protected Boolean hasAccess(RMApp app, HttpServletRequest hsr) {
+// Check f

[hadoop] branch trunk updated: YARN-10915. AbstractCSQueue: Simplify complex logic in methods: deriveCapacityFromAbsoluteConfigurations and updateEffectiveResources (#3418)

2021-09-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5dc2f7b  YARN-10915. AbstractCSQueue: Simplify complex logic in 
methods: deriveCapacityFromAbsoluteConfigurations and updateEffectiveResources 
(#3418)
5dc2f7b is described below

commit 5dc2f7b1376cdc392a5d204fb0ff3b9dec0ba36c
Author: Benjamin Teke 
AuthorDate: Tue Sep 14 18:05:40 2021 +0200

YARN-10915. AbstractCSQueue: Simplify complex logic in methods: 
deriveCapacityFromAbsoluteConfigurations and updateEffectiveResources (#3418)

Co-authored-by: Benjamin Teke 
---
 .../scheduler/capacity/AbstractCSQueue.java| 163 +++--
 .../scheduler/capacity/ParentQueue.java|   6 +-
 2 files changed, 91 insertions(+), 78 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index f97460c..75aebfe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -1516,8 +1516,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 parentQueueCapacities, queueCapacities.getExistingNodeLabels());
   }
 
-  private Resource getMinResourceNormalized(String name,
-  Map effectiveMinRatio, Resource minResource) {
+  private Resource createNormalizedMinResource(Resource minResource,
+  Map effectiveMinRatio) {
 Resource ret = Resource.newInstance(minResource);
 int maxLength = ResourceUtils.getNumberOfCountableResourceTypes();
 for (int i = 0; i < maxLength; i++) {
@@ -1527,18 +1527,35 @@ public abstract class AbstractCSQueue implements 
CSQueue {
   Float ratio = effectiveMinRatio.get(nResourceInformation.getName());
   if (ratio != null) {
 ret.setResourceValue(i,
-(long) (nResourceInformation.getValue() * ratio.floatValue()));
+(long) (nResourceInformation.getValue() * ratio));
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Updating min resource for Queue: " + name + " as " + ret
+  LOG.debug("Updating min resource for Queue: " + queuePath + " as " + 
ret
   .getResourceInformation(i) + ", Actual resource: "
-  + nResourceInformation.getValue() + ", ratio: " + ratio
-  .floatValue());
+  + nResourceInformation.getValue() + ", ratio: " + ratio);
 }
   }
 }
 return ret;
   }
 
+  private Resource getOrInheritMaxResource(Resource resourceByLabel, String 
label) {
+Resource parentMaxResource =
+parent.getQueueResourceQuotas().getConfiguredMaxResource(label);
+if (parentMaxResource.equals(Resources.none())) {
+  parentMaxResource =
+  parent.getQueueResourceQuotas().getEffectiveMaxResource(label);
+}
+
+Resource configuredMaxResource =
+getQueueResourceQuotas().getConfiguredMaxResource(label);
+if (configuredMaxResource.equals(Resources.none())) {
+  return Resources.clone(parentMaxResource);
+}
+
+return Resources.clone(Resources.min(resourceCalculator, resourceByLabel,
+configuredMaxResource, parentMaxResource));
+  }
+
   void updateMaxAppRelatedField(CapacitySchedulerConfiguration conf,
   LeafQueue leafQueue) {
 int maxApplications = conf.getMaximumApplicationsPerQueue(queuePath);
@@ -1586,99 +1603,97 @@ public abstract class AbstractCSQueue implements 
CSQueue {
 .getMaximumCapacity(maxLabel));
   }
 
-  private void deriveCapacityFromAbsoluteConfigurations(String label,
-  Resource clusterResource, ResourceCalculator rc) {
-
-/*
- * In case when queues are configured with absolute resources, it is better
- * to update capacity/max-capacity etc w.r.t absolute resource as well. In
- * case of computation, these values wont be used any more. However for
- * metrics and UI, its better these values are pre-computed here itself.
- */
-
-// 1. Update capacity as a float based on parent's minResource
-float f = rc.divide(clusterResource,
+  void deriveCapacityFromAbsoluteConfigurations(String label,
+  Resource clusterResource) {
+// Update capacity with a float calculated from the parent's minResources
+// and the recently changed queue minResources.
+// capacity = effect

[hadoop] branch trunk updated: YARN-10917. Investigate and simplify CapacitySchedulerConfigValidator#validateQueueHierarchy (#3403)

2021-09-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 783d94f  YARN-10917. Investigate and simplify 
CapacitySchedulerConfigValidator#validateQueueHierarchy (#3403)
783d94f is described below

commit 783d94f5cdf2f3b03aee5ae5a1bcd4cc14dcb292
Author: Tamas Domok 
AuthorDate: Tue Sep 14 17:54:25 2021 +0200

YARN-10917. Investigate and simplify 
CapacitySchedulerConfigValidator#validateQueueHierarchy (#3403)

* YARN-10917. Investigate and simplify 
CapacitySchedulerConfigValidator#validateQueueHierarchy.

Co-authored-by: Tamas Domok 
---
 .../capacity/CapacitySchedulerConfigValidator.java | 152 -
 1 file changed, 88 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java
index ef9f97a..ca0d586 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfigValidator.java
@@ -106,12 +106,8 @@ public final class CapacitySchedulerConfigValidator {
 }
   }
 
-  private static boolean isDynamicQueue(CSQueue csQueue) {
-return ((AbstractCSQueue)csQueue).isDynamicQueue();
-  }
-
   /**
-   * Ensure all existing queues are present. Queues cannot be deleted if its 
not
+   * Ensure all existing queues are present. Queues cannot be deleted if it's 
not
* in Stopped state, Queue's cannot be moved from one hierarchy to other 
also.
* Previous child queue could be converted into parent queue if it is in
* STOPPED state.
@@ -125,78 +121,106 @@ public final class CapacitySchedulerConfigValidator {
   CapacitySchedulerConfiguration newConf) throws IOException {
 // check that all static queues are included in the newQueues list
 for (CSQueue oldQueue : queues.getQueues()) {
-  if (!(AbstractAutoCreatedLeafQueue.class.isAssignableFrom(
-  oldQueue.getClass( {
-String queuePath = oldQueue.getQueuePath();
-CSQueue newQueue = newQueues.get(queuePath);
-String configPrefix = newConf.getQueuePrefix(
-oldQueue.getQueuePath());
-String state = newConf.get(configPrefix + "state");
-QueueState newQueueState = null;
-if (state != null) {
-  try {
-newQueueState = QueueState.valueOf(state);
-  } catch (Exception ex) {
-LOG.warn("Not a valid queue state for queue "
-+ oldQueue.getQueuePath());
+  if 
(AbstractAutoCreatedLeafQueue.class.isAssignableFrom(oldQueue.getClass())) {
+continue;
+  }
+
+  final String queuePath = oldQueue.getQueuePath();
+  final String configPrefix = 
CapacitySchedulerConfiguration.getQueuePrefix(
+  oldQueue.getQueuePath());
+  final QueueState newQueueState = 
createQueueState(newConf.get(configPrefix + "state"),
+  queuePath);
+  final CSQueue newQueue = newQueues.get(queuePath);
+
+  if (null == newQueue) {
+// old queue doesn't exist in the new XML
+if (isEitherQueueStopped(oldQueue.getState(), newQueueState)) {
+  LOG.info("Deleting Queue {}, as it is not present in the modified 
capacity " +
+  "configuration xml", queuePath);
+} else {
+  if (!isDynamicQueue(oldQueue)) {
+throw new IOException(oldQueue.getQueuePath() + " cannot be"
++ " deleted from the capacity scheduler configuration, as the"
++ " queue is not yet in stopped state. Current State : "
++ oldQueue.getState());
   }
 }
-if (null == newQueue) {
-  // old queue doesn't exist in the new XML
-  if (oldQueue.getState() == QueueState.STOPPED ||
-  newQueueState == QueueState.STOPPED) {
-LOG.info("Deleting Queue " + queuePath + ", as it is not"
-+ " present in the modified capacity configuration xml");
-  } else {
-if (!isDynamicQueue(oldQueue)) {
-  throw new IOException(oldQueue.getQueuePath() + " cannot be"
-  + " deleted from the capacity scheduler configuration, as 
the"
-  + " queue is not yet in stopped state. Cu

[hadoop] branch branch-3.2 updated: YARN-10901. Permission checking error on an existing directory in LogAggregationFileController#verifyAndCreateRemoteLogDir (#3410)

2021-09-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 478d2f2  YARN-10901. Permission checking error on an existing 
directory in LogAggregationFileController#verifyAndCreateRemoteLogDir (#3410)
478d2f2 is described below

commit 478d2f2897abba5bad2f5204b0d4ec02cbb7ba30
Author: Tamas Domok 
AuthorDate: Tue Sep 14 17:35:07 2021 +0200

YARN-10901. Permission checking error on an existing directory in 
LogAggregationFileController#verifyAndCreateRemoteLogDir (#3410)

Co-authored-by: Tamas Domok 
---
 .../LogAggregationFileController.java  | 13 +++-
 .../TestLogAggregationFileController.java  | 75 ++
 2 files changed, 86 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index 35f2d45..697ecbc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.logaggregation.filecontroller;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang3.RandomStringUtils;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -368,10 +369,13 @@ public abstract class LogAggregationFileController {
 throw new YarnRuntimeException("Failed to create remoteLogDir ["
 + remoteRootLogDir + "]", e);
   }
-} else{
+} else {
   //Check if FS has capability to set/modify permissions
+  Path permissionCheckFile = new Path(qualified, 
String.format("%s.permission_check",
+  RandomStringUtils.randomAlphanumeric(8)));
   try {
-remoteFS.setPermission(qualified, new FsPermission(TLDIR_PERMISSIONS));
+remoteFS.createNewFile(permissionCheckFile);
+remoteFS.setPermission(permissionCheckFile, new 
FsPermission(TLDIR_PERMISSIONS));
   } catch (UnsupportedOperationException use) {
 LOG.info("Unable to set permissions for configured filesystem since"
 + " it does not support this", remoteFS.getScheme());
@@ -379,6 +383,11 @@ public abstract class LogAggregationFileController {
   } catch (IOException e) {
 LOG.warn("Failed to check if FileSystem suppports permissions on "
 + "remoteLogDir [" + remoteRootLogDir + "]", e);
+  } finally {
+try {
+  remoteFS.delete(permissionCheckFile, false);
+} catch (IOException ignored) {
+}
   }
 }
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileController.java
new file mode 100644
index 000..3566884
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileController.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.logaggregation.filecontroller;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.net.URI;
+
+import static 
org.apache.hadoop

[hadoop] branch branch-3.3 updated: YARN-10901. Permission checking error on an existing directory in LogAggregationFileController#verifyAndCreateRemoteLogDir (#3409)

2021-09-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 8e4ac01  YARN-10901. Permission checking error on an existing 
directory in LogAggregationFileController#verifyAndCreateRemoteLogDir (#3409)
8e4ac01 is described below

commit 8e4ac01135e386fb5ed09846c0f7bb9deffdadde
Author: Tamas Domok 
AuthorDate: Tue Sep 14 17:34:32 2021 +0200

YARN-10901. Permission checking error on an existing directory in 
LogAggregationFileController#verifyAndCreateRemoteLogDir (#3409)

Co-authored-by: Tamas Domok 
---
 .../LogAggregationFileController.java  | 13 +-
 .../TestLogAggregationFileController.java  | 53 ++
 2 files changed, 64 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index cf305ba..4844ae2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.logaggregation.filecontroller;
 
+import org.apache.commons.lang3.RandomStringUtils;
 import 
org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
 
 import java.io.FileNotFoundException;
@@ -373,10 +374,13 @@ public abstract class LogAggregationFileController {
 throw new YarnRuntimeException("Failed to create remoteLogDir ["
 + remoteRootLogDir + "]", e);
   }
-} else{
+} else {
   //Check if FS has capability to set/modify permissions
+  Path permissionCheckFile = new Path(qualified, 
String.format("%s.permission_check",
+  RandomStringUtils.randomAlphanumeric(8)));
   try {
-remoteFS.setPermission(qualified, new FsPermission(TLDIR_PERMISSIONS));
+remoteFS.createNewFile(permissionCheckFile);
+remoteFS.setPermission(permissionCheckFile, new 
FsPermission(TLDIR_PERMISSIONS));
   } catch (UnsupportedOperationException use) {
 LOG.info("Unable to set permissions for configured filesystem since"
 + " it does not support this", remoteFS.getScheme());
@@ -384,6 +388,11 @@ public abstract class LogAggregationFileController {
   } catch (IOException e) {
 LOG.warn("Failed to check if FileSystem suppports permissions on "
 + "remoteLogDir [" + remoteRootLogDir + "]", e);
+  } finally {
+try {
+  remoteFS.delete(permissionCheckFile, false);
+} catch (IOException ignored) {
+}
   }
 }
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileController.java
index 5ade0fa..818e011 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileController.java
@@ -19,17 +19,23 @@
 package org.apache.hadoop.yarn.logaggregation.filecontroller;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 
 import java.io.FileNotFoundException;
 import java.net.URI;
 
+import static 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController.TLDIR_PERMISSIONS;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.argThat;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
@@ -88,4 +94,51 @@ public class TestLogAggregationFileController {
 
 verify(fs).setOwner(any(), eq("yarn_user"), eq(testGroupName));
   }
+
+  private static class PathContainsString implements ArgumentMatcher {
+private

[hadoop] branch trunk updated (4f563ff -> 63c8922)

2021-09-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 4f563ff  HDFS-16225. Fix typo for FederationTestUtils (#3428). 
Contributed by tomscut.
 add 63c8922  YARN-10912. 
AbstractCSQueue#updateConfigurableResourceRequirement: Separate validation 
logic from initialization logic (#3390)

No new revisions were added by this update.

Summary of changes:
 .../scheduler/capacity/AbstractCSQueue.java| 70 --
 1 file changed, 37 insertions(+), 33 deletions(-)

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-16225. Fix typo for FederationTestUtils (#3428). Contributed by tomscut.

2021-09-14 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4f563ff  HDFS-16225. Fix typo for FederationTestUtils (#3428). 
Contributed by tomscut.
4f563ff is described below

commit 4f563ff1ba7ed680e7bfe45365469882df2fe629
Author: litao 
AuthorDate: Tue Sep 14 16:58:19 2021 +0800

HDFS-16225. Fix typo for FederationTestUtils (#3428). Contributed by 
tomscut.
---
 .../org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
index 2017a45..e758eee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
@@ -165,7 +165,7 @@ public final class FederationTestUtils {
* @param resolver Active namenode resolver.
* @param nsId Nameservice identifier.
* @param nnId Namenode identifier.
-   * @param finalState State to check for.
+   * @param state State to check for.
* @throws Exception Failed to verify State Store registration of namenode
*   nsId:nnId for state.
*/
@@ -357,7 +357,7 @@ public final class FederationTestUtils {
*
* @param stateManager number of routers to be registered.
* @param routerCount number of routers to be registered.
-   * @param tiemout max wait time in ms
+   * @param timeout max wait time in ms
*/
   public static void waitRouterRegistered(RouterStore stateManager,
   long routerCount, int timeout) throws Exception {

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15920.Solve the problem that the value of SafeModeMonitor#RECHECK_INTERVAL can be configured. (#2831). Contributed by JiangHua Zhu.

2021-09-14 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1032104  HDFS-15920.Solve the problem that the value of 
SafeModeMonitor#RECHECK_INTERVAL can be configured. (#2831). Contributed by 
JiangHua Zhu.
1032104 is described below

commit 1032104b9ac473b25d3ed7cc53dca7a8530d79e1
Author: jianghuazhu <740087...@qq.com>
AuthorDate: Tue Sep 14 16:54:36 2021 +0800

HDFS-15920.Solve the problem that the value of 
SafeModeMonitor#RECHECK_INTERVAL can be configured. (#2831). Contributed by 
JiangHua Zhu.

Signed-off-by: Ayush Saxena 
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 
 .../blockmanagement/BlockManagerSafeMode.java  | 22 ++
 .../src/main/resources/hdfs-default.xml|  8 
 .../blockmanagement/TestBlockManagerSafeMode.java  | 12 
 4 files changed, 42 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f6c54dd..942929a 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -207,6 +207,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.namenode.replqueue.threshold-pct";
   public static final String  DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY = 
"dfs.namenode.safemode.min.datanodes";
   public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
+  public static final String  DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY =
+  "dfs.namenode.safemode.recheck.interval";
+  public static final longDFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_DEFAULT =
+  1000;
   public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
   public static final String  DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
index d731143..0bc6613 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
@@ -109,7 +109,7 @@ class BlockManagerSafeMode {
   /** Timestamp of the safe mode initialized. */
   private long startTime;
   /** the safe mode monitor thread. */
-  private final Daemon smmthread = new Daemon(new SafeModeMonitor());
+  private final Daemon smmthread;
 
   /** time of the last status printout */
   private long lastStatusReport;
@@ -156,6 +156,7 @@ class BlockManagerSafeMode {
 MILLISECONDS);
 
 this.inRollBack = isInRollBackMode(NameNode.getStartupOption(conf));
+this.smmthread = new Daemon(new SafeModeMonitor(conf));
 
 LOG.info("{} = {}", DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, threshold);
 LOG.info("{} = {}", DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
@@ -638,9 +639,22 @@ class BlockManagerSafeMode {
* Periodically check whether it is time to leave safe mode.
* This thread starts when the threshold level is reached.
*/
-  private class SafeModeMonitor implements Runnable {
+  final private class SafeModeMonitor implements Runnable {
 /** Interval in msec for checking safe mode. */
-private static final long RECHECK_INTERVAL = 1000;
+private long recheckInterval;
+
+private SafeModeMonitor(Configuration conf) {
+  recheckInterval = conf.getLong(
+  DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY,
+  DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_DEFAULT);
+  if (recheckInterval < 1) {
+LOG.warn("Invalid value for " +
+DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_DEFAULT +
+".Should be greater than 0, but is {}", recheckInterval);
+recheckInterval = 
DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_DEFAULT;
+  }
+  LOG.info("Using {} as SafeModeMonitor Interval", recheckInterval);
+}
 
 @Override
 public void run() {
@@ -660,7 +674,7 @@ class BlockManagerSafeMode {
 }
 
 try {
-  Thread.sleep(RECHECK_INTERVAL);
+  Thread.sleep(recheckInterval);
 } catch (InterruptedException ignored) {
 }
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hd

[hadoop] branch branch-3.2 updated: HDFS-16198. Short circuit read leaks Slot objects when InvalidToken exception is thrown (#3359)

2021-09-14 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4eeb4d3  HDFS-16198. Short circuit read leaks Slot objects when 
InvalidToken exception is thrown (#3359)
4eeb4d3 is described below

commit 4eeb4d32e5c8ab74fea861a5ea5926aec361da0c
Author: EungsopYoo 
AuthorDate: Tue Sep 14 14:18:15 2021 +0900

HDFS-16198. Short circuit read leaks Slot objects when InvalidToken 
exception is thrown (#3359)

Reviewed-by: He Xiaoqiao 
Reviewed-by: Wei-Chiu Chuang 
(cherry picked from commit c4c5883d8bf1fdc330e1da4d93eba760fa70c0e8)
(cherry picked from commit 51a4a23e373abe9b2f6469ffcd5bc8294307f025)
---
 .../hdfs/client/impl/BlockReaderFactory.java   |   3 +
 .../TestBlockTokenWithShortCircuitRead.java| 203 +
 2 files changed, 206 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 73d9e5c1..8d9b30d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -642,6 +642,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   "attempting to set up short-circuit access to " +
   fileName + resp.getMessage();
   LOG.debug("{}:{}", this, msg);
+  if (slot != null) {
+cache.freeSlot(slot);
+  }
   return new ShortCircuitReplicaInfo(new InvalidToken(msg));
 default:
   final long expiration =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
new file mode 100644
index 000..0a0fb11
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
+import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.shortcircuit.DfsClientShm;
+import 
org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
+import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
+import org.apache.hadoop.net.unix.DomainSocket;
+import org.apache.hadoop.net.unix.TemporarySocketDirectory;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.event.Level;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+import static 
org.apache.hadoop.fs.CommonConfigu

[hadoop] branch branch-3.3 updated: HDFS-16198. Short circuit read leaks Slot objects when InvalidToken exception is thrown (#3359)

2021-09-14 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 51a4a23  HDFS-16198. Short circuit read leaks Slot objects when 
InvalidToken exception is thrown (#3359)
51a4a23 is described below

commit 51a4a23e373abe9b2f6469ffcd5bc8294307f025
Author: EungsopYoo 
AuthorDate: Tue Sep 14 14:18:15 2021 +0900

HDFS-16198. Short circuit read leaks Slot objects when InvalidToken 
exception is thrown (#3359)

Reviewed-by: He Xiaoqiao 
Reviewed-by: Wei-Chiu Chuang 
(cherry picked from commit c4c5883d8bf1fdc330e1da4d93eba760fa70c0e8)
---
 .../hdfs/client/impl/BlockReaderFactory.java   |   3 +
 .../TestBlockTokenWithShortCircuitRead.java| 203 +
 2 files changed, 206 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index f9fd2b1..67c59cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -645,6 +645,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   "attempting to set up short-circuit access to " +
   fileName + resp.getMessage();
   LOG.debug("{}:{}", this, msg);
+  if (slot != null) {
+cache.freeSlot(slot);
+  }
   return new ShortCircuitReplicaInfo(new InvalidToken(msg));
 default:
   final long expiration =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
new file mode 100644
index 000..0a0fb11
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithShortCircuitRead.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
+import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.shortcircuit.DfsClientShm;
+import 
org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
+import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
+import org.apache.hadoop.net.unix.DomainSocket;
+import org.apache.hadoop.net.unix.TemporarySocketDirectory;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.event.Level;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
+import static 
org.a