(hadoop) branch trunk updated: HDFS-17408:Reduce the number of quota calculations in FSDirRenameOp (#6653). Contributed by lei w.
This is an automated email from the ASF dual-hosted git repository. zhangshuyan pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 36c22400b223 HDFS-17408:Reduce the number of quota calculations in FSDirRenameOp (#6653). Contributed by lei w. 36c22400b223 is described below commit 36c22400b22301c0cf35fd68ae18f2084bb0f25f Author: Lei313 <47049042+thinker...@users.noreply.github.com> AuthorDate: Tue Apr 2 10:40:28 2024 +0800 HDFS-17408:Reduce the number of quota calculations in FSDirRenameOp (#6653). Contributed by lei w. Reviewed-by: He Xiaoqiao Reviewed-by: Dinesh Chitlangia Signed-off-by: Shuyan Zhang --- .../hadoop/hdfs/server/namenode/FSDirMkdirOp.java | 6 +- .../hadoop/hdfs/server/namenode/FSDirRenameOp.java | 100 ++--- .../hadoop/hdfs/server/namenode/FSDirectory.java | 28 ++-- .../TestCorrectnessOfQuotaAfterRenameOp.java | 161 + .../namenode/snapshot/TestRenameWithSnapshots.java | 2 +- 5 files changed, 261 insertions(+), 36 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java index 862880d95b2d..0d7f3b202a0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java @@ -36,6 +36,8 @@ import org.apache.hadoop.security.AccessControlException; import java.io.IOException; import java.util.List; +import java.util.Optional; + import static org.apache.hadoop.util.Time.now; class FSDirMkdirOp { @@ -221,8 +223,8 @@ class FSDirMkdirOp { final INodeDirectory dir = new INodeDirectory(inodeId, name, permission, timestamp); -INodesInPath iip = -fsd.addLastINode(parent, dir, permission.getPermission(), true); +INodesInPath iip = fsd.addLastINode(parent, dir, permission.getPermission(), +true, Optional.empty()); if (iip != null && aclEntries != null) { AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index 64bc46d90162..0f6ceae82489 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; @@ -43,6 +45,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Optional; + import static org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import static org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; @@ -68,14 +72,18 @@ class FSDirRenameOp { * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves * dstInodes[dstInodes.length-1] */ - private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src, - INodesInPath dst) throws QuotaExceededException { + private static Pair, Optional> verifyQuotaForRename( + FSDirectory fsd, INodesInPath src, INodesInPath dst) throws QuotaExceededException { +Optional srcDelta = Optional.empty(); +Optional dstDelta = Optional.empty(); if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) { // Do not check quota if edits log is still being processed - return; + return Pair.of(srcDelta, dstDelta); } int i = 0; -while(src.getINode(i) == dst.getINode(i)) { i++; } +while (src.getINode(i) == dst.getINode(i)) { + i++; +} // src[i - 1] is the last common ancestor. BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite(); // Assume dstParent existence check done by callers. @@ -88,13 +96,19 @@ class FSDirRenameOp { final QuotaCounts delta = src.getLastINode() .computeQuotaUsage(bsps, storagePolicyID, false, Snapshot.CURRENT_STATE_ID); +QuotaCounts srcQuota = new QuotaCounts.Builder().quotaCount(delta).build(); +srcDelta = Optional.of(srcQuota); // Reduce the required quota by dst that is being removed final INode dstINode = ds
(hadoop) branch branch-3.3 updated: HADOOP-19115. Upgrade to nimbus-jose-jwt 9.37.2 due to CVE-2023-52428. (#6637) (#6689) Contributed by PJ Fanning.
This is an automated email from the ASF dual-hosted git repository. slfan1989 pushed a commit to branch branch-3.3 in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/branch-3.3 by this push: new 7cc64ce7a7c9 HADOOP-19115. Upgrade to nimbus-jose-jwt 9.37.2 due to CVE-2023-52428. (#6637) (#6689) Contributed by PJ Fanning. 7cc64ce7a7c9 is described below commit 7cc64ce7a7c9c37cd9490fc9060e64d73e72b42b Author: PJ Fanning AuthorDate: Tue Apr 2 01:50:33 2024 +0200 HADOOP-19115. Upgrade to nimbus-jose-jwt 9.37.2 due to CVE-2023-52428. (#6637) (#6689) Contributed by PJ Fanning. Signed-off-by: Shilun Fan --- LICENSE-binary | 2 +- hadoop-project/pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LICENSE-binary b/LICENSE-binary index 31d744b19d5f..43866bce657f 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -242,7 +242,7 @@ com.google.guava:guava:jar:30.1.1-jre com.google.guava:listenablefuture:.0-empty-to-avoid-conflict-with-guava com.google.j2objc:j2objc-annotations:1.3 com.microsoft.azure:azure-storage:7.0.1 -com.nimbusds:nimbus-jose-jwt:9.31 +com.nimbusds:nimbus-jose-jwt:9.37.2 com.yammer.metrics:metrics-core:2.2.0 com.zaxxer:HikariCP-java7:2.4.12 commons-beanutils:commons-beanutils:1.9.4 diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 5b910adfecb4..f9158e833fc3 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -217,7 +217,7 @@ 8.8.2 1.1.3.Final 5.4.0 -9.31 +9.37.2 v12.22.1 v1.22.5 1.10.13 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
(hadoop) branch trunk updated: YARN-11663. [Federation] Add Cache Entity Nums Limit. (#6662) Contributed by Shilun Fan.
This is an automated email from the ASF dual-hosted git repository. slfan1989 pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 5f3eb446f768 YARN-11663. [Federation] Add Cache Entity Nums Limit. (#6662) Contributed by Shilun Fan. 5f3eb446f768 is described below commit 5f3eb446f76803ed654597bdf88c70268d8d9623 Author: slfan1989 <55643692+slfan1...@users.noreply.github.com> AuthorDate: Tue Apr 2 07:47:59 2024 +0800 YARN-11663. [Federation] Add Cache Entity Nums Limit. (#6662) Contributed by Shilun Fan. Reviewed-by: Dinesh Chitlangia Signed-off-by: Shilun Fan --- .../apache/hadoop/yarn/conf/YarnConfiguration.java | 4 ++ .../src/main/resources/yarn-default.xml| 9 .../federation/cache/FederationGuavaCache.java | 12 - .../server/federation/cache/FederationJCache.java | 60 +++--- .../utils/TestFederationStateStoreFacade.java | 4 +- 5 files changed, 56 insertions(+), 33 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 0ab4107c1320..650e82d67381 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -4031,6 +4031,10 @@ public class YarnConfiguration extends Configuration { // 5 minutes public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60; + public static final String FEDERATION_CACHE_ENTITY_NUMS = + FEDERATION_PREFIX + "cache-entity.nums"; + public static final int DEFAULT_FEDERATION_CACHE_ENTITY_NUMS = 1000; + public static final String FEDERATION_FLUSH_CACHE_FOR_RM_ADDR = FEDERATION_PREFIX + "flush-cache-for-rm-addr"; public static final boolean DEFAULT_FEDERATION_FLUSH_CACHE_FOR_RM_ADDR = true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 41e38f601cbd..6b2d2cd817c6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -3787,6 +3787,15 @@ 300 + + + The number of entries in the Federation cache. + default is 1000. + +yarn.federation.cache-entity.nums +1000 + + The registry base directory for federation. yarn.federation.registry.base-dir diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/cache/FederationGuavaCache.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/cache/FederationGuavaCache.java index 5ab0ef77218d..2ba9e2869fe8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/cache/FederationGuavaCache.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/cache/FederationGuavaCache.java @@ -27,15 +27,20 @@ import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.Map; import java.util.concurrent.TimeUnit; public class FederationGuavaCache extends FederationCache { + private static final Logger LOG = LoggerFactory.getLogger(FederationCache.class); + private Cache> cache; private int cacheTimeToLive; + private long cacheEntityNums; private String className = this.getClass().getSimpleName(); @@ -52,6 +57,8 @@ public class FederationGuavaCache extends FederationCache { // no conflict or pick up a specific one in the future. cacheTimeToLive = pConf.getInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, YarnConfiguration.DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS); +cacheEntityNums = pConf.getLong(YarnConfiguration.FEDERATION_CACHE_ENTITY_NUMS, +YarnConfiguration.DEFAULT_FEDERATION_CACHE_ENTITY_NUMS); if (cacheTimeToLive <= 0) { isCachingEnabled = false; return; @@ -59,8 +66,11 @@ public class FederationGuavaCache extends FederationCache
(hadoop) branch trunk updated: HADOOP-19077. Remove use of javax.ws.rs.core.HttpHeaders (#6554). Contributed by PJ Fanning
This is an automated email from the ASF dual-hosted git repository. ayushsaxena pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new f7d1ec2d9e43 HADOOP-19077. Remove use of javax.ws.rs.core.HttpHeaders (#6554). Contributed by PJ Fanning f7d1ec2d9e43 is described below commit f7d1ec2d9e433b4073deac5d15a8234f0c6dd5e9 Author: PJ Fanning AuthorDate: Mon Apr 1 09:13:39 2024 +0200 HADOOP-19077. Remove use of javax.ws.rs.core.HttpHeaders (#6554). Contributed by PJ Fanning Signed-off-by: Ayush Saxena --- .../src/main/java/org/apache/hadoop/conf/ConfServlet.java | 2 +- .../src/test/java/org/apache/hadoop/conf/TestConfServlet.java | 2 +- .../src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java | 2 +- .../hdfs/web/oauth2/ConfRefreshTokenBasedAccessTokenProvider.java | 2 +- .../hadoop/hdfs/web/oauth2/CredentialBasedAccessTokenProvider.java| 2 +- .../apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java| 2 +- .../java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java | 2 +- .../java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java | 4 +--- .../src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java | 2 +- .../hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java | 2 +- .../hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java | 2 +- .../resourcemanager/webapp/TestRMWebServicesAppsModification.java | 2 +- .../hadoop/yarn/server/router/webapp/FederationInterceptorREST.java | 2 +- .../apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java | 2 +- .../yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java | 2 +- 15 files changed, 15 insertions(+), 17 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java index b427038fdddc..feaf5fdfefce 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java @@ -24,13 +24,13 @@ import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import javax.ws.rs.core.HttpHeaders; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.net.HttpHeaders; /** * A servlet to print out the running configuration data. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java index 6db47d6d22fd..dfb1f5567c6f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java @@ -27,10 +27,10 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.ServletConfig; import javax.servlet.ServletContext; -import javax.ws.rs.core.HttpHeaders; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; +import org.apache.hadoop.thirdparty.com.google.common.net.HttpHeaders; import org.eclipse.jetty.util.ajax.JSON; import org.w3c.dom.Document; import org.w3c.dom.Element; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index a8bd95b32a25..d0607e96dc42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -52,7 +52,6 @@ import java.util.Set; import java.util.StringTokenizer; import java.util.concurrent.TimeUnit; -import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import org.apache.commons.io.IOUtils; @@ -128,6 +127,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; import org.apache.hadoop.security.token.DelegationTokenIssuer; +import org.apache.hadoop.thirdparty.com.google.common.net.HttpHeaders; import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.KMSUt