[hadoop] branch branch-3.2 updated: YARN-10080. Support show app id on localizer thread pool (#4283)

2022-05-18 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 6322a774f32 YARN-10080. Support show app id on localizer thread pool 
(#4283)
6322a774f32 is described below

commit 6322a774f32f539859186fa06679a30f531dc134
Author: Ashutosh Gupta 
AuthorDate: Fri May 13 17:41:06 2022 +0100

YARN-10080. Support show app id on localizer thread pool (#4283)

Signed-off-by: Akira Ajisaka 
(cherry picked from commit 6985f9aabebefa86037f7dbc478374392afef990)
(cherry picked from commit d58f9d438fc35288492e0e41b690fe08e94894f0)
---
 .../nodemanager/containermanager/localizer/ContainerLocalizer.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index aeeb215d5f3..25c5be1852b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
@@ -217,7 +217,7 @@ public class ContainerLocalizer {
 
   ExecutorService createDownloadThreadPool() {
 return HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder()
-  .setNameFormat("ContainerLocalizer Downloader").build());
+  .setNameFormat("ContainerLocalizer Downloader-" + localizerId).build());
   }
 
   CompletionService createCompletionService(ExecutorService exec) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-11152. QueueMetrics is leaking memory when creating a new queue during reinitialisation

2022-05-18 Thread bteke
This is an automated email from the ASF dual-hosted git repository.

bteke pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0e6a6d18809 YARN-11152. QueueMetrics is leaking memory when creating a 
new queue during reinitialisation
0e6a6d18809 is described below

commit 0e6a6d18809c1958e3aaae88f0d3ce5bf380b350
Author: 9uapaw 
AuthorDate: Mon May 16 10:40:46 2022 +0200

YARN-11152. QueueMetrics is leaking memory when creating a new queue during 
reinitialisation
---
 .../scheduler/PartitionQueueMetrics.java   |  1 +
 .../resourcemanager/scheduler/QueueMetrics.java| 31 ++-
 .../scheduler/capacity/AbstractCSQueue.java|  1 +
 .../capacity/CapacitySchedulerQueueHelpers.java| 29 +++
 .../scheduler/capacity/TestCapacityScheduler.java  | 60 ++
 5 files changed, 121 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/PartitionQueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/PartitionQueueMetrics.java
index f43131809a0..02eaa7bd9b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/PartitionQueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/PartitionQueueMetrics.java
@@ -40,6 +40,7 @@ public class PartitionQueueMetrics extends QueueMetrics {
   String parentMetricName =
   partition + METRIC_NAME_DELIMITER + newQueueName;
   setParent(getQueueMetrics().get(parentMetricName));
+  storedPartitionMetrics = null;
 }
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 6f9b1ab47b2..3e6a1d7d712 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -22,7 +22,9 @@ import static org.apache.hadoop.metrics2.lib.Interns.info;
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -40,12 +42,14 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.util.Sets;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.metrics.CustomResourceMetricValue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -133,7 +137,7 @@ public class QueueMetrics implements MetricsSource {
   protected final MetricsRegistry registry;
   protected final String queueName;
   private QueueMetrics parent;
-  private final Queue parentQueue;
+  private Queue parentQueue;
   protected final MetricsSystem metricsSystem;
   protected final Map users;
   protected final Configuration conf;
@@ -177,6 +181,7 @@ public class QueueMetrics implements MetricsSource {
 "AggregatePreemptedSeconds.";
   private static final String AGGREGATE_PREEMPTED_SECONDS_METRIC_DESC =
 "Aggregate Preempted Seconds for NAME";
+  protected Set storedPartitionMetrics = Sets.newConcurrentHashSet();
 
   public QueueMetrics(MetricsSystem ms, String queueName, Queue parent,
   boolean enableUserMetrics, Configuration conf) {
@@ -338,6 +343,7 @@ public class QueueMetrics implements MetricsSource {
   queueMetrics.tag(PARTITION_INFO, partitionJMXStr).tag(QUEUE_INFO,
   

[hadoop] branch trunk updated (0b32c6c113c -> 54cd0174c0b)

2022-05-18 Thread bteke
This is an automated email from the ASF dual-hosted git repository.

bteke pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 0b32c6c113c HDFS-16540. Addendum: Data locality is lost when DataNode 
pod restarts in kubernetes. (#4326)
 add 54cd0174c0b YARN-11147. ResourceUsage and QueueCapacities classes 
provide node label iterators that are not thread safe

No new revisions were added by this update.

Summary of changes:
 .../monitor/capacity/FifoIntraQueuePreemptionPlugin.java | 4 +---
 .../server/resourcemanager/scheduler/AbstractResourceUsage.java  | 5 +++--
 .../resourcemanager/scheduler/capacity/AbstractCSQueue.java  | 4 ++--
 .../resourcemanager/scheduler/capacity/AbstractLeafQueue.java| 4 ++--
 .../server/resourcemanager/scheduler/capacity/CSQueueUtils.java  | 6 ++
 .../resourcemanager/scheduler/capacity/QueueCapacities.java  | 9 -
 .../server/resourcemanager/scheduler/capacity/UsersManager.java  | 4 ++--
 .../yarn/server/resourcemanager/webapp/dao/ResourcesInfo.java| 2 +-
 8 files changed, 13 insertions(+), 25 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-16540. Addendum: Data locality is lost when DataNode pod restarts in kubernetes. (#4326)

2022-05-18 Thread stack
This is an automated email from the ASF dual-hosted git repository.

stack pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0b32c6c113c HDFS-16540. Addendum: Data locality is lost when DataNode 
pod restarts in kubernetes. (#4326)
0b32c6c113c is described below

commit 0b32c6c113ceffb45a4d4d665936dc3ee4eba6b4
Author: Hexiaoqiao 
AuthorDate: Wed May 18 23:28:45 2022 +0800

HDFS-16540. Addendum: Data locality is lost when DataNode pod restarts in 
kubernetes. (#4326)
---
 .BUILDING.txt.swp | Bin 16384 -> 0 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)

diff --git a/.BUILDING.txt.swp b/.BUILDING.txt.swp
deleted file mode 100644
index 1fb0c25d0a5..000
Binary files a/.BUILDING.txt.swp and /dev/null differ


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: YARN-11141. Capacity Scheduler does not support ambiguous queue names when moving application across queues. Contributed by Andras Gyori

2022-05-18 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 90ec4418c76 YARN-11141. Capacity Scheduler does not support ambiguous 
queue names when moving application across queues. Contributed by Andras Gyori
90ec4418c76 is described below

commit 90ec4418c769c1aefe236d9cb79ce3c3da1c2601
Author: Szilard Nemeth 
AuthorDate: Wed May 18 14:34:08 2022 +0200

YARN-11141. Capacity Scheduler does not support ambiguous queue names when 
moving application across queues. Contributed by Andras Gyori
---
 .../resourcemanager/scheduler/capacity/CapacityScheduler.java  | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d0d95c388a6..ba1ab940f5a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2531,7 +2531,12 @@ public class CapacityScheduler extends
   if (application == null) {
 throw new YarnException("App to be moved " + appId + " not found.");
   }
-  String sourceQueueName = application.getQueue().getQueueName();
+  if (!(application.getQueue() instanceof CSQueue)) {
+throw new YarnException("Source queue is not a Capacity Scheduler 
queue");
+  }
+
+  CSQueue csQueue = (CSQueue) application.getQueue();
+  String sourceQueueName = csQueue.getQueuePath();
   LeafQueue source =
   this.queueManager.getAndCheckLeafQueue(sourceQueueName);
   String destQueueName = handleMoveToPlanQueue(targetQueueName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: YARN-11126. ZKConfigurationStore Java deserialisation vulnerability. Contributed by Tamas Domok

2022-05-18 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 4f112e3138c YARN-11126. ZKConfigurationStore Java deserialisation 
vulnerability. Contributed by Tamas Domok
4f112e3138c is described below

commit 4f112e3138cba4f74a00f48b268b4180a3eed4f1
Author: Szilard Nemeth 
AuthorDate: Wed May 18 14:25:35 2022 +0200

YARN-11126. ZKConfigurationStore Java deserialisation vulnerability. 
Contributed by Tamas Domok
---
 .../capacity/conf/ZKConfigurationStore.java|  5 ++--
 .../capacity/conf/TestZKConfigurationStore.java| 35 ++
 2 files changed, 38 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
index 703193b1b29..d55435a4c4b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
 
+import org.apache.commons.io.serialization.ValidatingObjectInputStream;
 import 
org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.slf4j.Logger;
@@ -35,7 +36,6 @@ import org.apache.zookeeper.data.ACL;
 import java.io.IOException;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -314,7 +314,8 @@ public class ZKConfigurationStore extends 
YarnConfigurationStore {
 
   private static Object deserializeObject(byte[] bytes) throws Exception {
 try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
-ObjectInputStream ois = new ObjectInputStream(bais);) {
+ ValidatingObjectInputStream ois = new 
ValidatingObjectInputStream(bais);) {
+  ois.accept(LinkedList.class, LogMutation.class, HashMap.class, 
String.class);
   return ois.readObject();
 }
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
index 880ba77fa51..155996d11fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
@@ -42,15 +42,18 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.Yar
 import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.ByteArrayOutputStream;
 import java.io.ObjectOutputStream;
 import java.util.Arrays;
+import java.util.Base64;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -67,6 +70,9 @@ public class TestZKConfigurationStore extends
   LoggerFactory.getLogger(TestZKConfigurationStore.class);
 
   private static final int ZK_TIMEOUT_MS = 1;
+  private static final String DESERIALIZATION_VULNERABILITY_FILEPATH =
+  "/tmp/ZK_DESERIALIZATION_VULNERABILITY";
+
   private TestingServer curatorTestingServer;
   private CuratorFramework curatorFramework;
   private ResourceManager rm;
@@ -401,6 +407,35 @@ public class TestZKConfigurationStore extends
 rm2.close();
   }
 
+  @Test(timeout = 3000)
+  @SuppressWarnings("checkstyle:linelength")
+  public void testDeserializationIsNotVulnerable() throws Exception {
+

[hadoop] branch branch-3.2 updated: YARN-11126. ZKConfigurationStore Java deserialisation vulnerability. Contributed by Tamas Domok

2022-05-18 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 45801fba8b0 YARN-11126. ZKConfigurationStore Java deserialisation 
vulnerability. Contributed by Tamas Domok
45801fba8b0 is described below

commit 45801fba8b00257ab32c02a7d1a05948ba687a49
Author: Szilard Nemeth 
AuthorDate: Wed May 18 14:23:56 2022 +0200

YARN-11126. ZKConfigurationStore Java deserialisation vulnerability. 
Contributed by Tamas Domok
---
 .../capacity/conf/ZKConfigurationStore.java|  5 ++--
 .../capacity/conf/TestZKConfigurationStore.java| 35 ++
 2 files changed, 38 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
index 6f3612c28da..cacf32b3911 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ZKConfigurationStore.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.io.serialization.ValidatingObjectInputStream;
 import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -35,7 +36,6 @@ import org.apache.zookeeper.data.ACL;
 import java.io.IOException;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -317,7 +317,8 @@ public class ZKConfigurationStore extends 
YarnConfigurationStore {
 
   private static Object deserializeObject(byte[] bytes) throws Exception {
 try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
-ObjectInputStream ois = new ObjectInputStream(bais);) {
+ ValidatingObjectInputStream ois = new 
ValidatingObjectInputStream(bais);) {
+  ois.accept(LinkedList.class, LogMutation.class, HashMap.class, 
String.class);
   return ois.readObject();
 }
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
index 40454a4621b..cb1da9c8963 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestZKConfigurationStore.java
@@ -41,15 +41,18 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.webapp.dao.QueueConfigInfo;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.ByteArrayOutputStream;
 import java.io.ObjectOutputStream;
 import java.util.Arrays;
+import java.util.Base64;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -66,6 +69,9 @@ public class TestZKConfigurationStore extends
   LoggerFactory.getLogger(TestZKConfigurationStore.class);
 
   private static final int ZK_TIMEOUT_MS = 1;
+  private static final String DESERIALIZATION_VULNERABILITY_FILEPATH =
+  "/tmp/ZK_DESERIALIZATION_VULNERABILITY";
+
   private TestingServer curatorTestingServer;
   private CuratorFramework curatorFramework;
   private ResourceManager rm;
@@ -398,6 +404,35 @@ public class TestZKConfigurationStore extends
 rm2.close();
   }
 
+  @Test(timeout = 3000)
+  @SuppressWarnings("checkstyle:linelength")
+  public void testDeserializationIsNotVulnerable() throws Exception {
+

[hadoop] branch branch-3.3 updated: YARN-10850. TimelineService v2 lists containers for all attempts when filtering for one. Contributed by Benjamin Teke

2022-05-18 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new b4550b33564 YARN-10850. TimelineService v2 lists containers for all 
attempts when filtering for one. Contributed by Benjamin Teke
b4550b33564 is described below

commit b4550b3356439f012ca6f97383889e236067e689
Author: Szilard Nemeth 
AuthorDate: Wed May 18 14:08:41 2022 +0200

YARN-10850. TimelineService v2 lists containers for all attempts when 
filtering for one. Contributed by Benjamin Teke
---
 .../hadoop/yarn/client/api/impl/AHSv2ClientImpl.java |  5 ++---
 .../client/api/impl/TimelineReaderClientImpl.java| 16 ++--
 .../api/impl/TestTimelineReaderClientImpl.java   | 20 +++-
 3 files changed, 35 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java
index b6a0c591c90..5fd3ee21fc5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java
@@ -137,9 +137,8 @@ public class AHSv2ClientImpl extends AHSClient {
 ApplicationId appId = applicationAttemptId.getApplicationId();
 ApplicationReport appReport = getApplicationReport(appId);
 Map filters = new HashMap<>();
-filters.put("infofilters", "SYSTEM_INFO_PARENT_ENTITY eq {\"id\":\"" +
-applicationAttemptId.toString() +
-"\",\"type\":\"YARN_APPLICATION_ATTEMPT\"}");
+filters.put("infofilters", "SYSTEM_INFO_PARENT_ENTITY eq "
++ "{\"type\":\"YARN_APPLICATION_ATTEMPT\",\"id\":\"" + 
applicationAttemptId + "\"}");
 List entities = readerClient.getContainerEntities(
 appId, "ALL", filters, 0, null);
 List containers =
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
index d5af8a0df56..de30fb84be0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
@@ -38,7 +38,10 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.MultivaluedMap;
 import java.io.IOException;
+import java.io.UnsupportedEncodingException;
 import java.net.URI;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
@@ -208,12 +211,21 @@ public class TimelineReaderClientImpl extends 
TimelineReaderClient {
 return Arrays.asList(entity);
   }
 
+  @VisibleForTesting
+  protected String encodeValue(String value) throws 
UnsupportedEncodingException {
+// Since URLEncoder doesn't use and doesn't have an option for 
percent-encoding
+// (as specified in RFC 3986) the spaces are encoded to + signs, which 
need to be replaced
+// manually
+return URLEncoder.encode(value, StandardCharsets.UTF_8.toString())
+.replaceAll("\\+", "%20");
+  }
+
   private void mergeFilters(MultivaluedMap defaults,
-  Map filters) {
+Map filters) throws 
UnsupportedEncodingException {
 if (filters != null && !filters.isEmpty()) {
   for (Map.Entry entry : filters.entrySet()) {
 if (!defaults.containsKey(entry.getKey())) {
-  defaults.add(entry.getKey(), filters.get(entry.getValue()));
+  defaults.add(entry.getKey(), encodeValue(entry.getValue()));
 }
   }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java
index f668472256a..757aeb8c31d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java
@@ -25,6 +25,7 @@ import static org.mockito.Mockito.when;
 
 import com.sun.jersey.api.client.ClientResponse;
 import 

[hadoop] branch branch-3.2 updated: YARN-10850. TimelineService v2 lists containers for all attempts when filtering for one. Contributed by Benjamin Teke

2022-05-18 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 1ae834fe0b1 YARN-10850. TimelineService v2 lists containers for all 
attempts when filtering for one. Contributed by Benjamin Teke
1ae834fe0b1 is described below

commit 1ae834fe0b1efb28a4fec56e9532b88864392cbe
Author: Szilard Nemeth 
AuthorDate: Wed May 18 14:04:51 2022 +0200

YARN-10850. TimelineService v2 lists containers for all attempts when 
filtering for one. Contributed by Benjamin Teke
---
 .../hadoop/yarn/client/api/impl/AHSv2ClientImpl.java |  5 ++---
 .../client/api/impl/TimelineReaderClientImpl.java| 16 ++--
 .../api/impl/TestTimelineReaderClientImpl.java   | 20 +++-
 3 files changed, 35 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java
index e797c281625..3422493aa1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSv2ClientImpl.java
@@ -129,9 +129,8 @@ public class AHSv2ClientImpl extends AHSClient {
   applicationAttemptId) throws  YarnException, IOException {
 ApplicationId appId = applicationAttemptId.getApplicationId();
 Map filters = new HashMap<>();
-filters.put("infofilters", "SYSTEM_INFO_PARENT_ENTITY eq {\"id\":\"" +
-applicationAttemptId.toString() +
-"\",\"type\":\"YARN_APPLICATION_ATTEMPT\"}");
+filters.put("infofilters", "SYSTEM_INFO_PARENT_ENTITY eq "
++ "{\"type\":\"YARN_APPLICATION_ATTEMPT\",\"id\":\"" + 
applicationAttemptId + "\"}");
 List entities = readerClient.getContainerEntities(
 appId, "ALL", filters, 0, null);
 List containers =
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
index db53f93136f..aafe4c3db11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineReaderClientImpl.java
@@ -38,7 +38,10 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.MultivaluedMap;
 import java.io.IOException;
+import java.io.UnsupportedEncodingException;
 import java.net.URI;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
@@ -208,12 +211,21 @@ public class TimelineReaderClientImpl extends 
TimelineReaderClient {
 return Arrays.asList(entity);
   }
 
+  @VisibleForTesting
+  protected String encodeValue(String value) throws 
UnsupportedEncodingException {
+// Since URLEncoder doesn't use and doesn't have an option for 
percent-encoding
+// (as specified in RFC 3986) the spaces are encoded to + signs, which 
need to be replaced
+// manually
+return URLEncoder.encode(value, StandardCharsets.UTF_8.toString())
+.replaceAll("\\+", "%20");
+  }
+
   private void mergeFilters(MultivaluedMap defaults,
-  Map filters) {
+Map filters) throws 
UnsupportedEncodingException {
 if (filters != null && !filters.isEmpty()) {
   for (Map.Entry entry : filters.entrySet()) {
 if (!defaults.containsKey(entry.getKey())) {
-  defaults.add(entry.getKey(), filters.get(entry.getValue()));
+  defaults.add(entry.getKey(), encodeValue(entry.getValue()));
 }
   }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java
index f668472256a..d6ca25f9501 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineReaderClientImpl.java
@@ -23,6 +23,7 @@ import static 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityT
 import static