[hadoop] branch trunk updated: HADOOP-18193:Support nested mount points in INodeTree
This is an automated email from the ASF dual-hosted git repository. omalley pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 6a95c3a0390 HADOOP-18193:Support nested mount points in INodeTree 6a95c3a0390 is described below commit 6a95c3a0390d1af29d1d61998f8e72510b0c3cf8 Author: Lei Yang AuthorDate: Mon May 9 11:52:15 2022 -0700 HADOOP-18193:Support nested mount points in INodeTree Fixes #4181 Signed-off-by: Owen O'Malley --- .../org/apache/hadoop/fs/viewfs/ConfigUtil.java| 18 + .../org/apache/hadoop/fs/viewfs/Constants.java | 13 +- .../org/apache/hadoop/fs/viewfs/InodeTree.java | 180 -- .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 106 +++--- .../java/org/apache/hadoop/fs/viewfs/ViewFs.java | 160 + .../hadoop/fs/viewfs/TestNestedMountPoint.java | 365 + .../apache/hadoop/fs/viewfs/TestViewFsConfig.java | 1 + .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java | 142 8 files changed, 814 insertions(+), 171 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java index 09ec5d29330..ead2a365f3a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java @@ -247,4 +247,22 @@ public class ConfigUtil { return conf.get(Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE_NAME_KEY, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); } + + /** + * Check the bool config whether nested mount point is supported. Default: true + * @param conf - from this conf + * @return whether nested mount point is supported + */ + public static boolean isNestedMountPointSupported(final Configuration conf) { +return conf.getBoolean(Constants.CONFIG_NESTED_MOUNT_POINT_SUPPORTED, true); + } + + /** + * Set the bool value isNestedMountPointSupported in config. + * @param conf - from this conf + * @param isNestedMountPointSupported - whether nested mount point is supported + */ + public static void setIsNestedMountPointSupported(final Configuration conf, boolean isNestedMountPointSupported) { +conf.setBoolean(Constants.CONFIG_NESTED_MOUNT_POINT_SUPPORTED, isNestedMountPointSupported); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index 21f4d99f891..806e69f32c6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -35,7 +35,7 @@ public interface Constants { * Prefix for the config variable for the ViewFs mount-table path. */ String CONFIG_VIEWFS_MOUNTTABLE_PATH = CONFIG_VIEWFS_PREFIX + ".path"; - + /** * Prefix for the home dir for the mount table - if not specified * then the hadoop default value (/user) is used. @@ -53,12 +53,17 @@ public interface Constants { */ public static final String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE = "default"; + /** + * Config to enable nested mount point in viewfs + */ + String CONFIG_NESTED_MOUNT_POINT_SUPPORTED = CONFIG_VIEWFS_PREFIX + ".nested.mount.point.supported"; + /** * Config variable full prefix for the default mount table. */ - public static final String CONFIG_VIEWFS_PREFIX_DEFAULT_MOUNT_TABLE = + public static final String CONFIG_VIEWFS_PREFIX_DEFAULT_MOUNT_TABLE = CONFIG_VIEWFS_PREFIX + "." + CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; - + /** * Config variable for specifying a simple link */ @@ -82,7 +87,7 @@ public interface Constants { /** * Config variable for specifying a merge of the root of the mount-table - * with the root of another file system. + * with the root of another file system. */ String CONFIG_VIEWFS_LINK_MERGE_SLASH = "linkMergeSlash"; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 23ad053a67d..a90084ad8f4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.fs.viewfs; +import java.util.Collection; +import java.util.Comparator; +import java.util.Set; +import java.util.TreeSet; import java.util.function.Function; import org.apache.hadoop.
[hadoop] branch trunk updated: HDFS-16465. Remove redundant strings.h inclusions (#4279)
This is an automated email from the ASF dual-hosted git repository. gaurava pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 1350539f2de HDFS-16465. Remove redundant strings.h inclusions (#4279) 1350539f2de is described below commit 1350539f2de158c08b47f336e8793e7aaecc8177 Author: Gautham B A AuthorDate: Wed May 11 23:04:22 2022 +0530 HDFS-16465. Remove redundant strings.h inclusions (#4279) * Remove redundant strings.h inclusions * strings.h was included in a bunch of C/C++ files and were redundant. * Also, strings.h is not available on Windows and thus isn't cross-platform compatible. * Build for all platforms in CI * Revert "Build for all platforms in CI" This reverts commit 2650f047bd6791a5908cfbe50cc8e70d42c512cb. * Debug failure on Centos 8 * Skipping pipeline run on Centos 7 to debug the failure on Centos 8. * Revert "Debug failure on Centos 8" This reverts commit e365e34d6fab9df88f4df622910ddb28a8c8796f. --- .../hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h| 1 - .../hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c | 1 - .../src/main/native/libhdfspp/lib/common/configuration.cc| 1 - .../src/main/native/libhdfspp/lib/common/configuration_loader.cc | 1 - 4 files changed, 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h index 4554dbdbea5..5ab8ac4131b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_dfs.h @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c index 02f1b5f282c..23a00a28898 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c @@ -21,7 +21,6 @@ #include #include #include -#include #include "fuse_context_handle.h" #include "fuse_dfs.h" diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc index 947214bdbd5..8219740ef05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration.cc @@ -35,7 +35,6 @@ #include "hdfspp/uri.h" #include "x-platform/syscall.h" -#include #include #include #include diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc index 5301137505a..7d88550a81f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/common/configuration_loader.cc @@ -21,7 +21,6 @@ #include "x-platform/syscall.h" #include -#include #include #include #include - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[hadoop] annotated tag release-3.3.3-RC1 created (now 4d532efce8d)
This is an automated email from the ASF dual-hosted git repository. stevel pushed a change to annotated tag release-3.3.3-RC1 in repository https://gitbox.apache.org/repos/asf/hadoop.git at 4d532efce8d (tag) tagging d37586cbda38c338d9fe481addda5a05fb516f71 (commit) replaces rel/release-3.3.2 by Steve Loughran on Wed May 11 17:53:14 2022 +0100 - Log - Release candidate -3.3.3-RC1 -BEGIN PGP SIGNATURE- iQIzBAABCgAdFiEEOCN+5CUFAoUHfbV60iz4RtuxYqAFAmJ76foACgkQ0iz4Rtux YqDXnA/8DdJJhk97s8QUCSG5vCrWzWniW+q+UdlwisEvxkBx1mmPxt0sb9uL2wja fhVhcm5DSQDmOcB1HJe93mF9+J+NXJ7x0hhqZrp9sxa7oezC7P1DT2syNEvjnHiq lhEV3CRNzqJ1KK/lTj8lcZGpmLJLaMdiMONr/79rJl+qQKczw90sApXC20zLu9eN 8ovL6EU186c/NP5kFS8F5c3ST/CJorZrghLqHSOl+Quq6UTdMhd3NCYUOl3MEy5w ms5RG2JSdzY+bCpOTACLIcnlijkkhXGZcjji0QXG3vDpub1JfQMeP8MUVu08p+g1 gM1d8Cq5QmyVs1dCHevcq2ADoq6Ine71csjUh091UdT47793fES3UM2RxPq3oi6S ucQ6TmkPZCSSVdPRt58vnYQ9OzOnd9vjDOGoBxS53KF4iKBmjsRLQX8iJlsOR8Kf WWPFqjscFF4L/gFGD2UO0SseX4ds0jqxkZykcEbBWKFcGrSDeLKdBRhZn/nIg7En FgUvKmPzLqjJqpxt3TdOoQcXf9a9Mpjyj8vjY/akE01zqZ9r53qfP7gh05cADn/K 5Cl3tgWSU7wlEBvkwd4bD7RFB6nzsaE1VIJnXtNj3d6QOEDcxRFOhGdHmRkoIP3d uCl9B9MIHBbUgO85H3CknWVHk4Orb5FxMG1gj/2x7v/Hgkt7ZH0= =4wmA -END PGP SIGNATURE- --- No new revisions were added by this update. - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[hadoop] branch trunk updated: YARN-11114. RMWebServices returns only apps matching exactly the submitted queue name. Contributed by Szilard Nemeth
This is an automated email from the ASF dual-hosted git repository. bteke pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new 9af3eabdca2 YARN-4. RMWebServices returns only apps matching exactly the submitted queue name. Contributed by Szilard Nemeth 9af3eabdca2 is described below commit 9af3eabdca2e246dbeb583cbc89d51e89ef11ecb Author: Szilard Nemeth AuthorDate: Wed Apr 20 19:39:47 2022 +0200 YARN-4. RMWebServices returns only apps matching exactly the submitted queue name. Contributed by Szilard Nemeth --- .../server/resourcemanager/ClientRMService.java| 29 +++- .../resourcemanager/TestClientRMService.java | 4 +- .../webapp/TestRMWebServicesApps.java | 150 - 3 files changed, 178 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 04efc886413..6c37b7e9c0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -37,7 +37,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; - import org.apache.commons.cli.UnrecognizedOptionException; import org.apache.commons.lang3.Range; import org.slf4j.Logger; @@ -913,7 +912,17 @@ public class ClientRMService extends AbstractService implements } if (queues != null && !queues.isEmpty()) { -if (!queues.contains(application.getQueue())) { +Map> foundApps = queryApplicationsByQueues(apps, queues); +List runningAppsByQueues = foundApps.entrySet().stream() +.filter(e -> queues.contains(e.getKey())) +.map(Map.Entry::getValue) +.flatMap(Collection::stream) +.collect(Collectors.toList()); +List runningAppsById = runningAppsByQueues.stream() +.filter(app -> app.getApplicationId().equals(application.getApplicationId())) +.collect(Collectors.toList()); + +if (runningAppsById.isEmpty() && !queues.contains(application.getQueue())) { continue; } } @@ -992,6 +1001,22 @@ public class ClientRMService extends AbstractService implements return response; } + private Map> queryApplicationsByQueues( + Map apps, Set queues) { +final Map> appsToQueues = new HashMap<>(); +for (String queue : queues) { + List appsInQueue = scheduler.getAppsInQueue(queue); + if (appsInQueue != null && !appsInQueue.isEmpty()) { +for (ApplicationAttemptId appAttemptId : appsInQueue) { + RMApp rmApp = apps.get(appAttemptId.getApplicationId()); + appsToQueues.putIfAbsent(queue, new ArrayList<>()); + appsToQueues.get(queue).add(rmApp); +} + } +} +return appsToQueues; + } + private Set getLowerCasedAppTypes(GetApplicationsRequest request) { Set applicationTypes = new HashSet<>(); if (request.getApplicationTypes() != null && !request.getApplicationTypes() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 7806845a2ed..9f4e9433b14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -1402,9 +1402,9 @@ public class TestClientRMService { request.setQueues(queueSet); queueSet.add(queues[0]); -assertEquals("Incorrect number of applications in queue", 2, +assertEquals("Incorrect number of applications in queue", 3, rmService.getApplications(request).getApplicationList().size()); -assertEquals("Incorrect number of applications in queue", 2, +assertEquals("Incorrect number of applications in queue", 3, rmService.getApplications(request).getApplicationList().size()); queueSet.add(queues[1]); diff --git a/h
[hadoop] branch trunk updated: MAPREDUCE-7379. RMContainerRequestor#makeRemoteRequest has confusing log message. Contributed by Ashutosh Gupta
This is an automated email from the ASF dual-hosted git repository. snemeth pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new f143e994285 MAPREDUCE-7379. RMContainerRequestor#makeRemoteRequest has confusing log message. Contributed by Ashutosh Gupta f143e994285 is described below commit f143e994285543e2f1a779274f826f38a78a27be Author: Szilard Nemeth AuthorDate: Wed May 11 16:55:19 2022 +0200 MAPREDUCE-7379. RMContainerRequestor#makeRemoteRequest has confusing log message. Contributed by Ashutosh Gupta --- .../hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java | 10 -- 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java index 61cc2eb898e..16ca585e9b4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java @@ -213,12 +213,10 @@ public abstract class RMContainerRequestor extends RMCommunicator { allocateResponse.getCompletedContainersStatuses().size(); if (ask.size() > 0 || release.size() > 0) { - LOG.info("getResources() for " + applicationId + ":" + " ask=" - + ask.size() + " release= " + release.size() + " newContainers=" - + allocateResponse.getAllocatedContainers().size() - + " finishedContainers=" + numCompletedContainers - + " resourcelimit=" + availableResources + " knownNMs=" - + clusterNmCount); + LOG.info("applicationId={}: ask={} release={} newContainers={} finishedContainers={}" + + " resourceLimit={} knownNMs={}", applicationId, ask.size(), release.size(), + allocateResponse.getAllocatedContainers().size(), numCompletedContainers, + availableResources, clusterNmCount); } ask.clear(); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[hadoop] branch trunk updated: YARN-10850. TimelineService v2 lists containers for all attempts when filtering for one. Contributed by Benjamin Teke
This is an automated email from the ASF dual-hosted git repository. snemeth pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new eeedc0c7e41 YARN-10850. TimelineService v2 lists containers for all attempts when filtering for one. Contributed by Benjamin Teke eeedc0c7e41 is described below commit eeedc0c7e41c7932169af2b05556ca155c765b22 Author: Szilard Nemeth AuthorDate: Wed May 11 14:39:42 2022 +0200 YARN-10850. TimelineService v2 lists containers for all attempts when filtering for one. Contributed by Benjamin Teke --- .../hadoop/yarn/client/api/impl/AHSv2ClientImpl.java | 5 ++--- .../client/api/impl/TimelineReaderClientImpl.java| 16 ++-- .../api/impl/TestTimelineReaderClientImpl.java | 20 +++- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java index e3653baa2b7..26f45baaee0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java @@ -137,9 +137,8 @@ public class AHSv2ClientImpl extends AHSClient { ApplicationId appId = applicationAttemptId.getApplicationId(); ApplicationReport appReport = getApplicationReport(appId); Map filters = new HashMap<>(); -filters.put("infofilters", "SYSTEM_INFO_PARENT_ENTITY eq {\"id\":\"" + -applicationAttemptId.toString() + -"\",\"type\":\"YARN_APPLICATION_ATTEMPT\"}"); +filters.put("infofilters", "SYSTEM_INFO_PARENT_ENTITY eq " ++ "{\"type\":\"YARN_APPLICATION_ATTEMPT\",\"id\":\"" + applicationAttemptId + "\"}"); List entities = readerClient.getContainerEntities( appId, "ALL", filters, 0, null); List containers = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java index 29609b955fb..71bf13220b0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java @@ -38,7 +38,10 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MultivaluedMap; import java.io.IOException; +import java.io.UnsupportedEncodingException; import java.net.URI; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -208,12 +211,21 @@ public class TimelineReaderClientImpl extends TimelineReaderClient { return Arrays.asList(entity); } + @VisibleForTesting + protected String encodeValue(String value) throws UnsupportedEncodingException { +// Since URLEncoder doesn't use and doesn't have an option for percent-encoding +// (as specified in RFC 3986) the spaces are encoded to + signs, which need to be replaced +// manually +return URLEncoder.encode(value, StandardCharsets.UTF_8.toString()) +.replaceAll("\\+", "%20"); + } + private void mergeFilters(MultivaluedMap defaults, - Map filters) { +Map filters) throws UnsupportedEncodingException { if (filters != null && !filters.isEmpty()) { for (Map.Entry entry : filters.entrySet()) { if (!defaults.containsKey(entry.getKey())) { - defaults.add(entry.getKey(), filters.get(entry.getValue())); + defaults.add(entry.getKey(), encodeValue(entry.getValue())); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java index f668472256a..757aeb8c31d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java @@ -25,6 +25,7 @@ import static org.mockito.Mockito.when; import com.sun.jersey.api.client.ClientResponse; import org.apache.hadoop.conf.Config
[hadoop] branch trunk updated: YARN-11141. Capacity Scheduler does not support ambiguous queue names when moving application across queues. Contributed by Andras Gyori
This is an automated email from the ASF dual-hosted git repository. snemeth pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/trunk by this push: new b62d6ce6fd6 YARN-11141. Capacity Scheduler does not support ambiguous queue names when moving application across queues. Contributed by Andras Gyori b62d6ce6fd6 is described below commit b62d6ce6fd6cd568e9b9a8729a03c7f773875fca Author: Szilard Nemeth AuthorDate: Wed May 11 14:27:34 2022 +0200 YARN-11141. Capacity Scheduler does not support ambiguous queue names when moving application across queues. Contributed by Andras Gyori --- .../scheduler/capacity/CapacityScheduler.java | 7 +++- .../capacity/CapacitySchedulerQueueHelpers.java| 30 .../capacity/CapacitySchedulerTestUtilities.java | 11 ++ .../capacity/TestCapacitySchedulerApps.java| 41 ++ 4 files changed, 88 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 44e80a6c234..cf5034ba228 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -2678,7 +2678,12 @@ public class CapacityScheduler extends if (application == null) { throw new YarnException("App to be moved " + appId + " not found."); } - String sourceQueueName = application.getQueue().getQueueName(); + if (!(application.getQueue() instanceof CSQueue)) { +throw new YarnException("Source queue is not a Capacity Scheduler queue"); + } + + CSQueue csQueue = (CSQueue) application.getQueue(); + String sourceQueueName = csQueue.getQueuePath(); AbstractLeafQueue source = this.queueManager.getAndCheckLeafQueue(sourceQueueName); String destQueueName = handleMoveToPlanQueue(targetQueueName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueHelpers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueHelpers.java index bc7e2b317ac..7e362731f86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueHelpers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueHelpers.java @@ -29,6 +29,7 @@ public final class CapacitySchedulerQueueHelpers { public static final String A = CapacitySchedulerConfiguration.ROOT + ".a"; public static final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + public static final String A_CHILD = A + ".a"; public static final String A1 = A + ".a1"; public static final String A2 = A + ".a2"; public static final String B1 = B + ".b1"; @@ -89,6 +90,35 @@ public final class CapacitySchedulerQueueHelpers { return conf; } + /** + * @param conf, to be modified + * @return CS configuration which has deleted all children of queue(b) + * root + * / \ + *ab + * / \ + * a1 a2 + */ + public static CapacitySchedulerConfiguration setupQueueConfAmbiguousQueue( + CapacitySchedulerConfiguration conf) { + +// Define top-level queues +conf.setQueues(CapacitySchedulerConfiguration.ROOT, +new String[]{"a", "b"}); + +conf.setCapacity(A, A_CAPACITY); +conf.setCapacity(B, B_CAPACITY); + +// Define 2nd-level queues +conf.setQueues(A, new String[]{"a", "a1"}); +conf.setCapacity(A_CHILD, A1_CAPACITY); +conf.setUserLimitFactor(A1, 100.0f); +conf.setCapacity(A1, A2_CAPACITY); +conf.setUserLimitFactor(A2, 100.0f); + +return conf; + } + /** * @param conf, to be modified * @return CS configuration which has deleted all childred of queue(b) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop