hadoop git commit: YARN-8921. SnapshotBasedOverAllocationPolicy always caps the amount of memory availabe to 4 GBs (haibochen via rkanter)

2018-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 da7722bda -> f3d08c76f


YARN-8921. SnapshotBasedOverAllocationPolicy always caps the amount of memory 
availabe to 4 GBs (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3d08c76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3d08c76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3d08c76

Branch: refs/heads/YARN-1011
Commit: f3d08c76f3983a7646d60d51d292af616a700662
Parents: da7722b
Author: Robert Kanter 
Authored: Fri Oct 26 09:57:10 2018 -0700
Committer: Robert Kanter 
Committed: Fri Oct 26 09:57:10 2018 -0700

--
 .../SnapshotBasedOverAllocationPolicy.java  |  19 ++-
 .../TestSnapshotBasedOverAllocationPolicy.java  | 153 +++
 2 files changed, 167 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3d08c76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/SnapshotBasedOverAllocationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/SnapshotBasedOverAllocationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/SnapshotBasedOverAllocationPolicy.java
index f486506..1a3ebca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/SnapshotBasedOverAllocationPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/SnapshotBasedOverAllocationPolicy.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.records.ResourceThresholds;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
+import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
  * An implementation of NMAllocationPolicy based on the
@@ -42,13 +43,21 @@ public class SnapshotBasedOverAllocationPolicy
   public Resource getAvailableResources() {
 ResourceUtilization utilization =
 containersMonitor.getContainersUtilization(true).getUtilization();
-long memoryAvailable = Math.round(
-overAllocationThresholds.getMemoryThreshold() *
-containersMonitor.getPmemAllocatedForContainers()) -
-(utilization.getPhysicalMemory() << 20);
+
+long memoryOverAllocationThresholdBytes =
+Math.round(
+((double) overAllocationThresholds.getMemoryThreshold()) *
+containersMonitor.getPmemAllocatedForContainers());
+long memoryUtilizationBytes =
+((long) utilization.getPhysicalMemory()) << 20;
+long memoryAvailable =
+memoryOverAllocationThresholdBytes - memoryUtilizationBytes;
+
 int vcoreAvailable = Math.round(
 (overAllocationThresholds.getCpuThreshold() - utilization.getCPU()) *
 containersMonitor.getVCoresAllocatedForContainers());
-return Resource.newInstance(memoryAvailable >> 20, vcoreAvailable);
+
+return (memoryAvailable <= 0 || vcoreAvailable <= 0) ? Resources.none() :
+Resource.newInstance(memoryAvailable >> 20, vcoreAvailable);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3d08c76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestSnapshotBasedOverAllocationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestSnapshotBasedOverAllocationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestSnapshotBasedOverAllocationPolicy.java
new file mode 100644
index 000..7900a61
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestSnapshotBasedOverAllocationPolicy.java
@@

hadoop git commit: YARN-8930. CGroup-based strict container memory enforcement does not work with CGroupElasticMemoryController (haibochen via rkanter)

2018-10-25 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk fb2b72e6f -> f76e3c3db


YARN-8930. CGroup-based strict container memory enforcement does not work with 
CGroupElasticMemoryController (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f76e3c3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f76e3c3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f76e3c3d

Branch: refs/heads/trunk
Commit: f76e3c3db789dd6866fa0fef8e014cbfe8c8f80d
Parents: fb2b72e
Author: Robert Kanter 
Authored: Thu Oct 25 10:43:36 2018 -0700
Committer: Robert Kanter 
Committed: Thu Oct 25 11:09:47 2018 -0700

--
 .../CGroupsMemoryResourceHandlerImpl.java   |  25 
 .../linux/resources/MemoryResourceHandler.java  |  10 --
 .../monitor/ContainersMonitorImpl.java  | 116 ---
 .../TestCGroupsMemoryResourceHandlerImpl.java   |  44 ---
 .../site/markdown/NodeManagerCGroupsMemory.md   |  12 +-
 5 files changed, 60 insertions(+), 147 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76e3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
index 053b796..ee5ce2a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
@@ -34,9 +34,6 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileg
 import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Optional;
-
-import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL;
 
 /**
  * Handler class to handle the memory controller. YARN already ships a
@@ -174,26 +171,4 @@ public class CGroupsMemoryResourceHandlerImpl implements 
MemoryResourceHandler {
   public List teardown() throws ResourceHandlerException {
 return null;
   }
-
-  @Override
-  public Optional isUnderOOM(ContainerId containerId) {
-try {
-  String status = cGroupsHandler.getCGroupParam(
-  CGroupsHandler.CGroupController.MEMORY,
-  containerId.toString(),
-  CGROUP_PARAM_MEMORY_OOM_CONTROL);
-  if (LOG.isDebugEnabled()) {
-LOG.debug("cgroups OOM status for " + containerId + ": " + status);
-  }
-  if (status.contains(CGroupsHandler.UNDER_OOM)) {
-LOG.warn("Container " + containerId + " under OOM based on cgroups.");
-return Optional.of(true);
-  } else {
-return Optional.of(false);
-  }
-} catch (ResourceHandlerException e) {
-  LOG.warn("Could not read cgroups" + containerId, e);
-}
-return Optional.empty();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76e3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
index 1729fc1..013a49f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yar

hadoop git commit: YARN-8929. DefaultOOMHandler should only pick running containers to kill upon oom events (haibochen via rkanter)

2018-10-24 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk ebf8e1731 -> 69b328943


YARN-8929. DefaultOOMHandler should only pick running containers to kill upon 
oom events (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69b32894
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69b32894
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69b32894

Branch: refs/heads/trunk
Commit: 69b328943edf2f61c8fc139934420e3f10bf3813
Parents: ebf8e17
Author: Robert Kanter 
Authored: Wed Oct 24 13:15:50 2018 -0700
Committer: Robert Kanter 
Committed: Wed Oct 24 13:15:50 2018 -0700

--
 .../linux/resources/CGroupsHandler.java |   2 +-
 .../linux/resources/CGroupsHandlerImpl.java |   4 +-
 .../linux/resources/DefaultOOMHandler.java  |  45 +-
 .../linux/resources/TestCGroupsHandlerImpl.java |   2 +-
 .../linux/resources/TestDefaultOOMHandler.java  | 434 +++
 5 files changed, 389 insertions(+), 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b32894/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
index 9dc16c3..dcb0589 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
@@ -71,7 +71,7 @@ public interface CGroupsHandler {
 }
   }
 
-  String CGROUP_FILE_TASKS = "tasks";
+  String CGROUP_PROCS_FILE = "cgroup.procs";
   String CGROUP_PARAM_CLASSID = "classid";
   String CGROUP_PARAM_BLKIO_WEIGHT = "weight";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b32894/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index a547e8f..050d0a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -347,7 +347,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   public String getPathForCGroupTasks(CGroupController controller,
   String cGroupId) {
 return getPathForCGroup(controller, cGroupId)
-+ Path.SEPARATOR + CGROUP_FILE_TASKS;
++ Path.SEPARATOR + CGROUP_PROCS_FILE;
   }
 
   @Override
@@ -603,7 +603,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   public String getCGroupParam(CGroupController controller, String cGroupId,
   String param) throws ResourceHandlerException {
 String cGroupParamPath =
-param.equals(CGROUP_FILE_TASKS) ?
+param.equals(CGROUP_PROCS_FILE) ?
 getPathForCGroup(controller, cGroupId)
 + Path.SEPARATOR + param :
 getPathForCGroupParam(controller, cGroupId, param);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69b32894/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/h

hadoop git commit: YARN-8919. Some tests fail due to NoClassDefFoundError for OperatorCreationException (tasanuma0829 via rkanter)

2018-10-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 93fb3b4b9 -> 5e294e950


YARN-8919. Some tests fail due to NoClassDefFoundError for 
OperatorCreationException (tasanuma0829 via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e294e95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e294e95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e294e95

Branch: refs/heads/trunk
Commit: 5e294e950b9d1b6f852f2d3467a9809a82f9860e
Parents: 93fb3b4
Author: Robert Kanter 
Authored: Tue Oct 23 13:37:17 2018 -0700
Committer: Robert Kanter 
Committed: Tue Oct 23 13:37:17 2018 -0700

--
 hadoop-tools/hadoop-extras/pom.xml| 10 ++
 hadoop-tools/hadoop-gridmix/pom.xml   | 10 ++
 hadoop-tools/hadoop-streaming/pom.xml | 10 ++
 3 files changed, 30 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e294e95/hadoop-tools/hadoop-extras/pom.xml
--
diff --git a/hadoop-tools/hadoop-extras/pom.xml 
b/hadoop-tools/hadoop-extras/pom.xml
index 270696a..0a18625 100644
--- a/hadoop-tools/hadoop-extras/pom.xml
+++ b/hadoop-tools/hadoop-extras/pom.xml
@@ -108,6 +108,16 @@
cglib
test
 
+
+  org.bouncycastle
+  bcprov-jdk15on
+  test
+
+
+  org.bouncycastle
+  bcpkix-jdk15on
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e294e95/hadoop-tools/hadoop-gridmix/pom.xml
--
diff --git a/hadoop-tools/hadoop-gridmix/pom.xml 
b/hadoop-tools/hadoop-gridmix/pom.xml
index 857d56c..2af0a3c 100644
--- a/hadoop-tools/hadoop-gridmix/pom.xml
+++ b/hadoop-tools/hadoop-gridmix/pom.xml
@@ -108,6 +108,16 @@
   mockito-all
   test
 
+
+  org.bouncycastle
+  bcprov-jdk15on
+  test
+
+
+  org.bouncycastle
+  bcpkix-jdk15on
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e294e95/hadoop-tools/hadoop-streaming/pom.xml
--
diff --git a/hadoop-tools/hadoop-streaming/pom.xml 
b/hadoop-tools/hadoop-streaming/pom.xml
index 1ab024e..9c7865b 100644
--- a/hadoop-tools/hadoop-streaming/pom.xml
+++ b/hadoop-tools/hadoop-streaming/pom.xml
@@ -114,6 +114,16 @@
   cglib
   test
 
+
+  org.bouncycastle
+  bcprov-jdk15on
+  test
+
+
+  org.bouncycastle
+  bcpkix-jdk15on
+  test
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15853. TestConfigurationDeprecation leaves behind a temp file, resulting in a license issue (ayushtkn via rkanter)

2018-10-15 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk ef9dc6c44 -> 7fe1a40a6


HADOOP-15853. TestConfigurationDeprecation leaves behind a temp file, resulting 
in a license issue (ayushtkn via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fe1a40a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fe1a40a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fe1a40a

Branch: refs/heads/trunk
Commit: 7fe1a40a6ba692ce5907b96db3a7cb3639c091bd
Parents: ef9dc6c
Author: Robert Kanter 
Authored: Mon Oct 15 15:02:37 2018 -0700
Committer: Robert Kanter 
Committed: Mon Oct 15 15:02:37 2018 -0700

--
 .../java/org/apache/hadoop/conf/TestConfigurationDeprecation.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe1a40a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
index 4014b60..efb8131 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
@@ -77,6 +77,7 @@ public class TestConfigurationDeprecation {
 new File(CONFIG).delete();
 new File(CONFIG2).delete();
 new File(CONFIG3).delete();
+new File(CONFIG4).delete();
   }
   
   private void startConfig() throws IOException{


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15832. Addendum: Upgrade BouncyCastle to 1.60. Contributed by Robert Kanter

2018-10-12 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5ec86b445 -> f1342cdf4


HADOOP-15832. Addendum: Upgrade BouncyCastle to 1.60. Contributed by Robert 
Kanter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1342cdf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1342cdf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1342cdf

Branch: refs/heads/trunk
Commit: f1342cdf415bdf5d7f686220ff62bb51006f6874
Parents: 5ec86b4
Author: Robert Kanter 
Authored: Fri Oct 12 09:40:34 2018 -0700
Committer: Robert Kanter 
Committed: Fri Oct 12 09:40:34 2018 -0700

--
 README.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1342cdf/README.txt
--
diff --git a/README.txt b/README.txt
index 559099b..f11b1b4 100644
--- a/README.txt
+++ b/README.txt
@@ -29,3 +29,5 @@ The following provides more details on the included 
cryptographic
 software:
   Hadoop Core uses the SSL libraries from the Jetty project written 
 by mortbay.org.
+  Hadoop Yarn Server Web Proxy uses the BouncyCastle Java
+cryptography APIs written by the Legion of the Bouncy Castle Inc.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15717. TGT renewal thread does not log IOException (snemeth via rkanter)

2018-10-11 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 64f2b32d5 -> d91d47bc7


HADOOP-15717. TGT renewal thread does not log IOException (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d91d47bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d91d47bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d91d47bc

Branch: refs/heads/trunk
Commit: d91d47bc739f23ca22a7e44fc83d449db57ab130
Parents: 64f2b32
Author: Robert Kanter 
Authored: Thu Oct 11 15:34:41 2018 -0700
Committer: Robert Kanter 
Committed: Thu Oct 11 15:35:44 2018 -0700

--
 .../hadoop/security/UserGroupInformation.java | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d91d47bc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index db88106..915d6df 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -920,8 +920,8 @@ public class UserGroupInformation {
   final long now = Time.now();
 
   if (tgt.isDestroyed()) {
-LOG.error("TGT is destroyed. Aborting renew thread for {}.",
-getUserName());
+LOG.error(String.format("TGT is destroyed. " +
+"Aborting renew thread for %s.", getUserName()), ie);
 return;
   }
 
@@ -933,16 +933,18 @@ public class UserGroupInformation {
   try {
 tgtEndTime = tgt.getEndTime().getTime();
   } catch (NullPointerException npe) {
-LOG.error("NPE thrown while getting KerberosTicket endTime. "
-+ "Aborting renew thread for {}.", getUserName());
+LOG.error(String.format("NPE thrown while getting " +
+"KerberosTicket endTime. Aborting renew thread for %s.",
+getUserName()), ie);
 return;
   }
 
-  LOG.warn("Exception encountered while running the renewal "
-  + "command for {}. (TGT end time:{}, renewalFailures: {},"
-  + "renewalFailuresTotal: {})", getUserName(), tgtEndTime,
+  LOG.warn(String.format("Exception encountered while running the " +
+  "renewal command for %s. " +
+  "(TGT end time:%d, renewalFailures: %d, " +
+  "renewalFailuresTotal: %d)", getUserName(), tgtEndTime,
   metrics.renewalFailures.value(),
-  metrics.renewalFailuresTotal.value(), ie);
+  metrics.renewalFailuresTotal.value()), ie);
   if (rp == null) {
 // Use a dummy maxRetries to create the policy. The policy will
 // only be used to get next retry time with exponential back-off.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15708. Reading values from Configuration before adding deprecations make it impossible to read value with deprecated key (zsiegl via rkanter)

2018-10-10 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2bd000c85 -> f261c3193


HADOOP-15708. Reading values from Configuration before adding deprecations make 
it impossible to read value with deprecated key (zsiegl via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f261c319
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f261c319
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f261c319

Branch: refs/heads/trunk
Commit: f261c319375c5a8c298338752ee77214c22f4e29
Parents: 2bd000c
Author: Robert Kanter 
Authored: Wed Oct 10 18:51:37 2018 -0700
Committer: Robert Kanter 
Committed: Wed Oct 10 18:51:37 2018 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   | 26 +-
 .../conf/TestConfigurationDeprecation.java  | 88 ++--
 2 files changed, 88 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f261c319/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index a78e311..c004cb5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -709,6 +709,9 @@ public class Configuration implements 
Iterable>,
* deprecated key, the value of the deprecated key is set as the value for
* the provided property name.
*
+   * Also updates properties and overlays with deprecated keys, if the new
+   * key does not already exist.
+   *
* @param deprecations deprecation context
* @param name the property name
* @return the first property in the list of properties mapping
@@ -730,6 +733,11 @@ public class Configuration implements 
Iterable>,
   // Override return value for deprecated keys
   names = keyInfo.newKeys;
 }
+
+// Update properties with deprecated key if already loaded and new
+// deprecation has been added
+updatePropertiesWithDeprecatedKeys(deprecations, names);
+
 // If there are no overlay values we can return early
 Properties overlayProperties = getOverlay();
 if (overlayProperties.isEmpty()) {
@@ -748,6 +756,19 @@ public class Configuration implements 
Iterable>,
 }
 return names;
   }
+
+  private void updatePropertiesWithDeprecatedKeys(
+  DeprecationContext deprecations, String[] newNames) {
+for (String newName : newNames) {
+  String deprecatedKey = 
deprecations.getReverseDeprecatedKeyMap().get(newName);
+  if (deprecatedKey != null && !getProps().containsKey(newName)) {
+String deprecatedValue = getProps().getProperty(deprecatedKey);
+if (deprecatedValue != null) {
+  getProps().setProperty(newName, deprecatedValue);
+}
+  }
+}
+  }
  
   private void handleDeprecation() {
 LOG.debug("Handling deprecation for all properties in config...");
@@ -1187,7 +1208,10 @@ public class Configuration implements 
Iterable>,
* the first key which replaces the deprecated key and is not null.
* 
* Values are processed for variable 
expansion 
-   * before being returned. 
+   * before being returned.
+   *
+   * As a side effect get loads the properties from the sources if called for
+   * the first time as a lazy init.
* 
* @param name the property name, will be trimmed before get value.
* @return the value of the name or its replacing property, 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f261c319/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
index 93a235a..4014b60 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationDeprecation.java
@@ -38,6 +38,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.junit.Assert;
 
 import org.apache.hadoop.fs.Path;
@@ -52,9 +53,14 @@ import com.google.common

hadoop git commit: YARN-8813. Improve debug messages for NM preemption of OPPORTUNISTIC containers (haibochen via rkanter)

2018-10-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 ad642186a -> bb5991423


YARN-8813. Improve debug messages for NM preemption of OPPORTUNISTIC containers 
(haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb599142
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb599142
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb599142

Branch: refs/heads/YARN-1011
Commit: bb59914237b76cb7409b63da27b21a07a4481942
Parents: ad64218
Author: Robert Kanter 
Authored: Tue Oct 9 16:15:35 2018 -0700
Committer: Robert Kanter 
Committed: Tue Oct 9 16:15:35 2018 -0700

--
 .../linux/resources/CGroupElasticMemoryController.java | 13 -
 .../linux/resources/DefaultOOMHandler.java |  3 +++
 .../monitor/ContainersMonitorImpl.java |  4 
 .../SnapshotBasedOverAllocationPreemptionPolicy.java   | 12 
 4 files changed, 27 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb599142/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
index 752c3a6..b47edbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
@@ -94,6 +94,7 @@ public class CGroupElasticMemoryController extends Thread {
 boolean controlVirtual = controlVirtualMemory && !controlPhysicalMemory;
 Runnable oomHandlerTemp =
 getDefaultOOMHandler(conf, context, oomHandlerOverride, 
controlVirtual);
+LOG.info("Using OOMHandler: " + oomHandlerTemp.getClass().getName());
 if (controlPhysicalMemory && controlVirtualMemory) {
   LOG.warn(
   NM_ELASTIC_MEMORY_CONTROL_ENABLED + " is on. " +
@@ -138,11 +139,10 @@ public class CGroupElasticMemoryController extends Thread 
{
   Configuration conf, Context context, Runnable oomHandlerLocal,
   boolean controlVirtual)
   throws YarnException {
-Class oomHandlerClass =
-conf.getClass(
-YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER,
-DefaultOOMHandler.class);
 if (oomHandlerLocal == null) {
+  Class oomHandlerClass = conf.getClass(
+  YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER,
+  DefaultOOMHandler.class);
   try {
 Constructor constr = oomHandlerClass.getConstructor(
 Context.class, boolean.class);
@@ -284,12 +284,15 @@ public class CGroupElasticMemoryController extends Thread 
{
   // This loop can be exited by terminating the process
   // with stopListening()
   while ((read = events.read(event)) == event.length) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("OOM event notification received from oom-listener");
+}
 // An OOM event has occurred
 resolveOOM(executor);
   }
 
   if (read != -1) {
-LOG.warn(String.format("Characters returned from event hander: %d",
+LOG.warn(String.format("Characters returned from event handler: %d",
 read));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb599142/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java

hadoop git commit: YARN-8807. FairScheduler crashes RM with oversubscription turned on if an application is killed. (haibochen via rkanter)

2018-10-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 efd852449 -> ad642186a


YARN-8807. FairScheduler crashes RM with oversubscription turned on if an 
application is killed. (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad642186
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad642186
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad642186

Branch: refs/heads/YARN-1011
Commit: ad642186a908b3b75afb4bc6270177564a8d75d6
Parents: efd8524
Author: Robert Kanter 
Authored: Tue Oct 9 14:15:54 2018 -0700
Committer: Robert Kanter 
Committed: Tue Oct 9 14:15:54 2018 -0700

--
 .../scheduler/fair/FairScheduler.java   |   9 +-
 .../scheduler/fair/TestFairScheduler.java   | 105 +++
 2 files changed, 110 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad642186/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 744776a..44aad67 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1162,10 +1162,11 @@ public class FairScheduler extends
 for (RMContainer rmContainer : promoted)  {
   FSAppAttempt appAttempt = getSchedulerApp(
   rmContainer.getApplicationAttemptId());
-  appAttempt.opportunisticContainerPromoted(rmContainer);
-
-  promotion.put(rmContainer.getContainer(),
-  ContainerUpdateType.PROMOTE_EXECUTION_TYPE);
+  if (appAttempt != null) {
+appAttempt.opportunisticContainerPromoted(rmContainer);
+promotion.put(rmContainer.getContainer(),
+ContainerUpdateType.PROMOTE_EXECUTION_TYPE);
+  }
 }
 
 if (!promotion.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad642186/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 5847ca9..ec4f082 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -119,6 +119,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtil
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
@@ -3948,6 +3949,110 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
   }
 
   @Test
+  public void testKillingApplicationWithOpportunisticContainersAssigned()
+  throws IOException {
+conf.setBoolean(YarnConfiguration.RM_SCHEDULER_OVERSUBSCRIPTION_ENABLED,
+true);
+// disable resou

hadoop git commit: Changed version in trunk to 3.3.0-SNAPSHOT addendum

2018-10-02 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk e8b860431 -> 96ae4ac45


Changed version in trunk to 3.3.0-SNAPSHOT addendum


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96ae4ac4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96ae4ac4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96ae4ac4

Branch: refs/heads/trunk
Commit: 96ae4ac45fe84b3da696a7beb3b6590af031543b
Parents: e8b8604
Author: Robert Kanter 
Authored: Tue Oct 2 16:48:59 2018 -0700
Committer: Robert Kanter 
Committed: Tue Oct 2 16:48:59 2018 -0700

--
 hadoop-project/pom.xml | 2 +-
 pom.xml| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96ae4ac4/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index cd38376..49551a6 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -50,7 +50,7 @@
 
 0.8.2.1
 
-3.2.0-SNAPSHOT
+3.3.0-SNAPSHOT
 1.0.13
 
 ${project.build.directory}/test-dir

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96ae4ac4/pom.xml
--
diff --git a/pom.xml b/pom.xml
index b2606dd..6c8b132 100644
--- a/pom.xml
+++ b/pom.xml
@@ -80,7 +80,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 
   
 
-3.2.0-SNAPSHOT
+3.3.0-SNAPSHOT
 
 apache.snapshots.https
 Apache Development Snapshot 
Repository


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8568. Replace the deprecated zk-address property in the HA config example in ResourceManagerHA.md (bsteinbach via rkanter)

2018-08-14 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c72674ee3 -> fd63be750


YARN-8568. Replace the deprecated zk-address property in the HA config example 
in ResourceManagerHA.md (bsteinbach via rkanter)

(cherry picked from commit 8478732bb28e9e71061d6b4a043a3a1b5c688902)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd63be75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd63be75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd63be75

Branch: refs/heads/branch-3.0
Commit: fd63be750ace70da3361960ec239ba80fc335015
Parents: c72674e
Author: Robert Kanter 
Authored: Wed Aug 8 15:08:55 2018 -0700
Committer: Robert Kanter 
Committed: Tue Aug 14 10:16:44 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd63be75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
index 61eb773..a9c336d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
@@ -111,7 +111,7 @@ Here is the sample of minimal setup for RM failover.
   master2:8088
 
 
-  yarn.resourcemanager.zk-address
+  hadoop.zk.address
   zk1:2181,zk2:2181,zk3:2181
 
 ```


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-4946. RM should not consider an application as COMPLETED when log aggregation is not in a terminal state (snemeth via rkanter)

2018-08-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8244abb7a -> b2517dd66


YARN-4946. RM should not consider an application as COMPLETED when log 
aggregation is not in a terminal state (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2517dd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2517dd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2517dd6

Branch: refs/heads/trunk
Commit: b2517dd66b3c88fdd478411cf208921bd3023755
Parents: 8244abb
Author: Robert Kanter 
Authored: Thu Aug 9 14:58:04 2018 -0700
Committer: Robert Kanter 
Committed: Thu Aug 9 14:58:04 2018 -0700

--
 .../server/resourcemanager/RMAppManager.java|  81 +--
 .../server/resourcemanager/rmapp/RMApp.java |   6 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |   8 +-
 .../server/resourcemanager/TestAppManager.java  | 241 +++
 .../applicationsmanager/MockAsm.java|  11 +
 .../server/resourcemanager/rmapp/MockRMApp.java |  20 ++
 6 files changed, 294 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2517dd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7011aaa..ee78c08 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -86,7 +86,7 @@ public class RMAppManager implements 
EventHandler,
   private int maxCompletedAppsInMemory;
   private int maxCompletedAppsInStateStore;
   protected int completedAppsInStateStore = 0;
-  private LinkedList completedApps = new 
LinkedList();
+  protected LinkedList completedApps = new LinkedList<>();
 
   private final RMContext rmContext;
   private final ApplicationMasterService masterService;
@@ -284,31 +284,72 @@ public class RMAppManager implements 
EventHandler,
* check to see if hit the limit for max # completed apps kept
*/
   protected synchronized void checkAppNumCompletedLimit() {
-// check apps kept in state store.
-while (completedAppsInStateStore > this.maxCompletedAppsInStateStore) {
-  ApplicationId removeId =
-  completedApps.get(completedApps.size() - completedAppsInStateStore);
+if (completedAppsInStateStore > maxCompletedAppsInStateStore) {
+  removeCompletedAppsFromStateStore();
+}
+
+if (completedApps.size() > maxCompletedAppsInMemory) {
+  removeCompletedAppsFromMemory();
+}
+  }
+
+  private void removeCompletedAppsFromStateStore() {
+int numDelete = completedAppsInStateStore - maxCompletedAppsInStateStore;
+for (int i = 0; i < numDelete; i++) {
+  ApplicationId removeId = completedApps.get(i);
   RMApp removeApp = rmContext.getRMApps().get(removeId);
-  LOG.info("Max number of completed apps kept in state store met:"
-  + " maxCompletedAppsInStateStore = " + maxCompletedAppsInStateStore
-  + ", removing app " + removeApp.getApplicationId()
-  + " from state store.");
-  rmContext.getStateStore().removeApplication(removeApp);
-  completedAppsInStateStore--;
+  boolean deleteApp = shouldDeleteApp(removeApp);
+
+  if (deleteApp) {
+LOG.info("Max number of completed apps kept in state store met:"
++ " maxCompletedAppsInStateStore = "
++ maxCompletedAppsInStateStore + ", removing app " + removeId
++ " from state store.");
+rmContext.getStateStore().removeApplication(removeApp);
+completedAppsInStateStore--;
+  } else {
+LOG.info("Max number of completed apps kept in state store met:"
++ " maxCompletedAppsInStateStore = "
++ maxCompletedAppsInStateStore + ", but not removing app "
++ removeId
++ " from state store as log aggregation have not finished yet.");
+  }
 }
+  }
 
-// check apps kept in memorty.
-while (completedApps.size() > this.m

hadoop git commit: YARN-8568. Replace the deprecated zk-address property in the HA config example in ResourceManagerHA.md (bsteinbach via rkanter)

2018-08-08 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f16c31148 -> 0ed91309d


YARN-8568. Replace the deprecated zk-address property in the HA config example 
in ResourceManagerHA.md (bsteinbach via rkanter)

(cherry picked from commit 8478732bb28e9e71061d6b4a043a3a1b5c688902)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ed91309
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ed91309
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ed91309

Branch: refs/heads/branch-3.1
Commit: 0ed91309d3e032c8713a922f25db399c962326db
Parents: f16c311
Author: Robert Kanter 
Authored: Wed Aug 8 15:08:55 2018 -0700
Committer: Robert Kanter 
Committed: Wed Aug 8 15:10:52 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ed91309/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
index da9f5a0..ff97328 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
@@ -111,7 +111,7 @@ Here is the sample of minimal setup for RM failover.
   master2:8088
 
 
-  yarn.resourcemanager.zk-address
+  hadoop.zk.address
   zk1:2181,zk2:2181,zk3:2181
 
 ```


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8568. Replace the deprecated zk-address property in the HA config example in ResourceManagerHA.md (bsteinbach via rkanter)

2018-08-08 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 7deebb0b7 -> 28b780277


YARN-8568. Replace the deprecated zk-address property in the HA config example 
in ResourceManagerHA.md (bsteinbach via rkanter)

(cherry picked from commit 8478732bb28e9e71061d6b4a043a3a1b5c688902)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28b78027
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28b78027
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28b78027

Branch: refs/heads/branch-3
Commit: 28b7802772f6e4ff55a82c7a4048ea7d0682b6b1
Parents: 7deebb0
Author: Robert Kanter 
Authored: Wed Aug 8 15:08:55 2018 -0700
Committer: Robert Kanter 
Committed: Wed Aug 8 15:09:26 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28b78027/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
index 61eb773..a9c336d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
@@ -111,7 +111,7 @@ Here is the sample of minimal setup for RM failover.
   master2:8088
 
 
-  yarn.resourcemanager.zk-address
+  hadoop.zk.address
   zk1:2181,zk2:2181,zk3:2181
 
 ```


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8568. Replace the deprecated zk-address property in the HA config example in ResourceManagerHA.md (bsteinbach via rkanter)

2018-08-08 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3214cd75a -> 8478732bb


YARN-8568. Replace the deprecated zk-address property in the HA config example 
in ResourceManagerHA.md (bsteinbach via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8478732b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8478732b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8478732b

Branch: refs/heads/trunk
Commit: 8478732bb28e9e71061d6b4a043a3a1b5c688902
Parents: 3214cd7
Author: Robert Kanter 
Authored: Wed Aug 8 15:08:55 2018 -0700
Committer: Robert Kanter 
Committed: Wed Aug 8 15:08:55 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8478732b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
index da9f5a0..ff97328 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerHA.md
@@ -111,7 +111,7 @@ Here is the sample of minimal setup for RM failover.
   master2:8088
 
 
-  yarn.resourcemanager.zk-address
+  hadoop.zk.address
   zk1:2181,zk2:2181,zk3:2181
 
 ```


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via rkanter)

2018-07-31 Thread rkanter
Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via 
rkanter)

(cherry picked from commit 0838fe833738e04f5e6f6408e97866d77bebbf30)
(cherry picked from commit c1dc4ca2c6080377159157ce97bf5d72fa3285a1)
(cherry picked from commit 92f02f97fd8e8306fda7374b5180a633622f9636)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4328a7e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4328a7e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4328a7e0

Branch: refs/heads/branch-2.9
Commit: 4328a7e0ed755883d9cf6c84e1c07b34f6368266
Parents: e20a840
Author: Robert Kanter 
Authored: Mon Jul 9 10:37:20 2018 -0700
Committer: Robert Kanter 
Committed: Tue Jul 31 13:41:56 2018 -0700

--
 .../impl/container-executor.c   | 30 +++-
 .../test/test-container-executor.c  | 20 +
 2 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4328a7e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index b2c2bab..1b5d0e4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -2162,6 +2162,28 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
   free(path_tmp);
 }
 
+int is_empty(char *target_dir) {
+  DIR *dir = NULL;
+  struct dirent *entry = NULL;
+  dir = opendir(target_dir);
+  if (!dir) {
+fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir,
+strerror(errno));
+return 0;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+if (strcmp(entry->d_name, ".") == 0) {
+  continue;
+}
+if (strcmp(entry->d_name, "..") == 0) {
+  continue;
+}
+fprintf(LOGFILE, "Directory is not empty %s\n", target_dir);
+return 0;
+  }
+  return 1;
+}
+
 /**
  * Mount a cgroup controller at the requested mount point and create
  * a hierarchy for the Hadoop NodeManager to manage.
@@ -2196,7 +2218,13 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
 result = -1;
   } else {
 if (strstr(mount_path, "..") != NULL) {
-  fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+  fprintf(LOGFILE, "Unsupported cgroup mount path detected. %s\n",
+  mount_path);
+  result = INVALID_COMMAND_PROVIDED;
+  goto cleanup;
+}
+if (!is_empty(mount_path)) {
+  fprintf(LOGFILE, "cgroup mount path is not empty. %s\n", mount_path);
   result = INVALID_COMMAND_PROVIDED;
   goto cleanup;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4328a7e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 468e3c3..dc69ad3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1188,6 +1188,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+void test_is_empty() {
+  printf("\nTesting is_empty function\n");
+  if (is_empty("/")) {
+printf("FAIL: / should not be empty\n");
+exit(1);
+  }
+  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+exit(1);
+  }
+  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
+  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
+printf("

[1/3] hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

2018-07-31 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 3961effa1 -> d9b9c9125


Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

(cherry picked from commit 351cf87c92872d90f62c476f85ae4d02e485769c)
(cherry picked from commit d61d84279f7f22867c23dd95e8bfeb70ea7e0690)
(cherry picked from commit f5fd5aa025c904e9a2ff8c5fd932aaed2363a6a0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e20a8401
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e20a8401
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e20a8401

Branch: refs/heads/branch-2.9
Commit: e20a840174bc2b27fcc0935e0086977bd6fbfcb3
Parents: 3961eff
Author: Robert Kanter 
Authored: Thu Jun 7 17:09:34 2018 -0700
Committer: Robert Kanter 
Committed: Tue Jul 31 13:41:50 2018 -0700

--
 .../impl/container-executor.c   | 54 ++--
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 19 ---
 .../src/site/markdown/NodeManagerCgroups.md |  2 +-
 4 files changed, 55 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e20a8401/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index a5a7e6a..b2c2bab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -70,6 +70,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"mapred", 
"hdfs", "bin", 0};
 
 static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0;
 static const int DEFAULT_TC_SUPPORT_ENABLED = 0;
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
 
 //location of traffic control binary
 static const char* TC_BIN = "/sbin/tc";
@@ -469,6 +470,12 @@ int is_tc_support_enabled() {
 DEFAULT_TC_SUPPORT_ENABLED, &executor_cfg);
 }
 
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED,
+  &executor_cfg);
+}
+
 /**
  * Utility function to concatenate argB to argA using the concat_pattern.
  */
@@ -2129,20 +2136,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
 while ((ep = readdir(dp)) != NULL) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -2166,11 +2178,16 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(len);
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
-  struct stat sb;
 
   if (controller == NULL || mount_path == NULL) {
 fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n");
 result = OUT_OF_MEMORY;
+goto cleanup;
+  }
+  if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) {
+fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n");
+result = INVALID_COMMAND_PROVIDED;
+goto cleanup;
   }
   if (get_kv_key(pair, controller, len) < 0 ||
   get_kv_value(pair, mount_path, len) < 0) {
@@ -2178,13 +2195,10 @@ int mount_cgroup(const 

[3/3] hadoop git commit: YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via rkanter)

2018-07-31 Thread rkanter
YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via 
rkanter)

(cherry picked from commit 1bc106a738a6ce4f7ed025d556bb44c1ede022e3)
(cherry picked from commit 6e0db6fe1a8ce50977175567f2ba1f957e7b9c91)
(cherry picked from commit edb9d8b55419dabf5b8ace678e5ddb5cd559972b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9b9c912
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9b9c912
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9b9c912

Branch: refs/heads/branch-2.9
Commit: d9b9c9125815b20ef63ba65c4c2394c89345be9c
Parents: 4328a7e
Author: Robert Kanter 
Authored: Thu Jul 12 16:38:46 2018 -0700
Committer: Robert Kanter 
Committed: Tue Jul 31 13:42:01 2018 -0700

--
 .../container-executor/test/test-container-executor.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9b9c912/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index dc69ad3..a0e18e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1188,19 +1188,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+int is_empty(char *name);
+
 void test_is_empty() {
   printf("\nTesting is_empty function\n");
   if (is_empty("/")) {
 printf("FAIL: / should not be empty\n");
 exit(1);
   }
-  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+  char *noexist = TEST_ROOT "/noexist";
+  if (is_empty(noexist)) {
+printf("%s should not exist\n", noexist);
 exit(1);
   }
-  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
-  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+  char *emptydir = TEST_ROOT "/emptydir";
+  mkdir(emptydir, S_IRWXU);
+  if (!is_empty(emptydir)) {
+printf("FAIL: %s should be empty\n", emptydir);
 exit(1);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15644. Hadoop Docker Image Pip Install Fails on branch-2 (haibochen via rkanter)

2018-07-31 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d2ec11743 -> a7163886c


HADOOP-15644. Hadoop Docker Image Pip Install Fails on branch-2 (haibochen via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7163886
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7163886
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7163886

Branch: refs/heads/branch-2
Commit: a7163886ce6b52b9d3d3e1a19b17be49df1957be
Parents: d2ec117
Author: Robert Kanter 
Authored: Tue Jul 31 13:28:49 2018 -0700
Committer: Robert Kanter 
Committed: Tue Jul 31 13:28:49 2018 -0700

--
 dev-support/docker/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7163886/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index b1fc420..026109f 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -122,7 +122,7 @@ RUN apt-get -q install --no-install-recommends -y bats
 
 # Install pylint
 
-RUN pip install pylint
+RUN pip install pylint==1.9.2
 
 
 # Install dateutil.parser


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8517. getContainer and getContainers ResourceManager REST API methods are not documented (snemeth via rkanter)

2018-07-27 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk fecbac499 -> 2cccf4061


YARN-8517. getContainer and getContainers ResourceManager REST API methods are 
not documented (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cccf406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cccf406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cccf406

Branch: refs/heads/trunk
Commit: 2cccf4061cc4021c48e29879700dbc94f832b7d1
Parents: fecbac4
Author: Robert Kanter 
Authored: Fri Jul 27 14:35:03 2018 -0700
Committer: Robert Kanter 
Committed: Fri Jul 27 14:35:03 2018 -0700

--
 .../InvalidResourceRequestException.java|  36 ++
 .../resourcemanager/DefaultAMSProcessor.java|  23 +-
 .../scheduler/SchedulerUtils.java   |  55 +-
 .../scheduler/TestSchedulerUtils.java   | 630 ++-
 4 files changed, 430 insertions(+), 314 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
index f4fd2fa..1ea9eef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
@@ -30,19 +30,55 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
  * 
  */
 public class InvalidResourceRequestException extends YarnException {
+  public static final String LESS_THAN_ZERO_RESOURCE_MESSAGE_TEMPLATE =
+  "Invalid resource request! Cannot allocate containers as "
+  + "requested resource is less than 0! "
+  + "Requested resource type=[%s], " + "Requested resource=%s";
+
+  public static final String GREATER_THAN_MAX_RESOURCE_MESSAGE_TEMPLATE =
+  "Invalid resource request! Cannot allocate containers as "
+  + "requested resource is greater than " +
+  "maximum allowed allocation. "
+  + "Requested resource type=[%s], "
+  + "Requested resource=%s, maximum allowed allocation=%s, "
+  + "please note that maximum allowed allocation is calculated 
"
+  + "by scheduler based on maximum resource of registered "
+  + "NodeManagers, which might be less than configured "
+  + "maximum allocation=%s";
+
+  public static final String UNKNOWN_REASON_MESSAGE_TEMPLATE =
+  "Invalid resource request! "
+  + "Cannot allocate containers for an unknown reason! "
+  + "Requested resource type=[%s], Requested resource=%s";
+
+  public enum InvalidResourceType {
+LESS_THAN_ZERO, GREATER_THEN_MAX_ALLOCATION, UNKNOWN;
+  }
 
   private static final long serialVersionUID = 13498237L;
+  private final InvalidResourceType invalidResourceType;
 
   public InvalidResourceRequestException(Throwable cause) {
 super(cause);
+this.invalidResourceType = InvalidResourceType.UNKNOWN;
   }
 
   public InvalidResourceRequestException(String message) {
+this(message, InvalidResourceType.UNKNOWN);
+  }
+
+  public InvalidResourceRequestException(String message,
+  InvalidResourceType invalidResourceType) {
 super(message);
+this.invalidResourceType = invalidResourceType;
   }
 
   public InvalidResourceRequestException(String message, Throwable cause) {
 super(message, cause);
+this.invalidResourceType = InvalidResourceType.UNKNOWN;
   }
 
+  public InvalidResourceType getInvalidResourceType() {
+return invalidResourceType;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
 
b/hadoop-yarn-project/hadoop-yar

hadoop git commit: YARN-8566. Add diagnostic message for unschedulable containers (snemeth via rkanter)

2018-07-27 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk b429f19d3 -> fecbac499


YARN-8566. Add diagnostic message for unschedulable containers (snemeth via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fecbac49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fecbac49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fecbac49

Branch: refs/heads/trunk
Commit: fecbac499e2ae6b3334773a997d454a518f43e01
Parents: b429f19
Author: Robert Kanter 
Authored: Fri Jul 27 14:32:34 2018 -0700
Committer: Robert Kanter 
Committed: Fri Jul 27 14:32:34 2018 -0700

--
 .../src/site/markdown/ResourceManagerRest.md| 285 +++
 1 file changed, 285 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fecbac49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index a30677c..24c2319 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -2326,6 +2326,291 @@ Response Body:
 
 ```
 
+Containers for an Application Attempt API
+-
+
+With Containers for an Application Attempt API you can obtain the list of 
containers, which belongs to an Application Attempt.
+
+### URI
+
+  * 
http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers
+
+### HTTP Operations Supported
+
+  * GET
+
+### Query Parameters Supported
+
+  None
+
+### Elements of the *containers* object
+
+When you make a request for the list of containers, the information will be 
returned as an array of container objects.
+
+containers:
+
+| Item | Data Type | Description |
+|: |: |: |
+| containers | array of app container objects(JSON)/zero or more container 
objects(XML) | The collection of app container objects |
+
+### Elements of the *container* object
+
+| Item | Data Type | Description |
+|: |: |: |
+| containerId | string | The container id |
+| allocatedMB | long | The amount of memory allocated for the container in MB |
+| allocatedVCores | int | The amount of virtual cores allocated for the 
container |
+| assignedNodeId | string | The node id of the node the attempt ran on |
+| priority | int | Allocated priority of the container |
+| startedTime | long | The start time of the attempt (in ms since epoch) |
+| finishedTime | long | The finish time of the attempt (in ms since epoch) 0 
if not finished |
+| elapsedTime | long | The elapsed time in ms since the startedTime |
+| logUrl | string | The web URL that can be used to check the log for the 
container |
+| containerExitStatus | int | Final exit status of the container |
+| containerState | string | State of the container, can be NEW, RUNNING, or 
COMPLETE |
+| nodeHttpAddress | string | The node http address of the node the attempt ran 
on ||
+| nodeId | string | The node id of the node the attempt ran on |
+| allocatedResources |array of resource(JSON)/zero or more resource 
objects(XML) | Allocated resources for the container |
+
+### Elements of the *resource* object
+| Item | Data Type | Description |
+|: |: |: |
+| memory | int | The maximum memory for the container |
+| vCores | int | The maximum number of vcores for the container |
+
+**JSON response**
+
+HTTP Request:
+
+  GET 
http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers
+
+Response Header:
+
+  HTTP/1.1 200 OK
+  Content-Type: application/json
+  Transfer-Encoding: chunked
+  Server: Jetty(6.1.26)
+
+Response Body:
+
+```json
+{
+  "containers" : {
+"container": [
+  {
+  "containerId": "container_1531404209605_0008_01_01",
+  "allocatedMB": "1536",
+  "allocatedVCores": "1",
+  "assignedNodeId": "host.domain.com:37814",
+  "priority": "0",
+  "startedTime": "1531405909444",
+  "finishedTime": "0",
+  "elapsedTime": "4112",
+  "logUrl": 
"http://host.domain.com:8042/node/containerlogs/container_1531404209605_0008_01_01/systest";,
+  "containerExitStatus": "0",
+  "containerState": "RUNNING",
+  "nodeHttpAddress": "

[3/3] hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

2018-07-18 Thread rkanter
Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

(cherry picked from commit 351cf87c92872d90f62c476f85ae4d02e485769c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d61d8427
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d61d8427
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d61d8427

Branch: refs/heads/branch-3.0
Commit: d61d84279f7f22867c23dd95e8bfeb70ea7e0690
Parents: 34afb9f
Author: Robert Kanter 
Authored: Thu Jun 7 17:09:34 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:10:57 2018 -0700

--
 .../impl/container-executor.c   | 54 ++--
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 19 ---
 .../src/site/markdown/NodeManagerCgroups.md |  2 +-
 4 files changed, 55 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61d8427/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index c09c161..8959f32 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -70,6 +70,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"yarn", 
"mapred", "hdfs", "bin", 0}
 
 static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0;
 static const int DEFAULT_TC_SUPPORT_ENABLED = 0;
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
 
 //location of traffic control binary
 static const char* TC_BIN = "/sbin/tc";
@@ -469,6 +470,12 @@ int is_tc_support_enabled() {
 DEFAULT_TC_SUPPORT_ENABLED, &executor_cfg);
 }
 
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED,
+  &executor_cfg);
+}
+
 /**
  * Utility function to concatenate argB to argA using the concat_pattern.
  */
@@ -2198,20 +2205,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
 while ((ep = readdir(dp)) != NULL) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -2235,11 +2247,16 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(len);
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
-  struct stat sb;
 
   if (controller == NULL || mount_path == NULL) {
 fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n");
 result = OUT_OF_MEMORY;
+goto cleanup;
+  }
+  if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) {
+fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n");
+result = INVALID_COMMAND_PROVIDED;
+goto cleanup;
   }
   if (get_kv_key(pair, controller, len) < 0 ||
   get_kv_value(pair, mount_path, len) < 0) {
@@ -2247,13 +2264,10 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   pair);
 result = -1;
   } else {
-if (stat(mount_path, &sb) != 0) {
-  // Create mount point, if it does not exist
-  const mode_t mount_pe

[1/3] hadoop git commit: YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via rkanter)

2018-07-18 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 34afb9f94 -> 6e0db6fe1


YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via 
rkanter)

(cherry picked from commit 1bc106a738a6ce4f7ed025d556bb44c1ede022e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e0db6fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e0db6fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e0db6fe

Branch: refs/heads/branch-3.0
Commit: 6e0db6fe1a8ce50977175567f2ba1f957e7b9c91
Parents: c1dc4ca
Author: Robert Kanter 
Authored: Thu Jul 12 16:38:46 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:10:57 2018 -0700

--
 .../container-executor/test/test-container-executor.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0db6fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 3979708..235ea77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1197,19 +1197,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+int is_empty(char *name);
+
 void test_is_empty() {
   printf("\nTesting is_empty function\n");
   if (is_empty("/")) {
 printf("FAIL: / should not be empty\n");
 exit(1);
   }
-  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+  char *noexist = TEST_ROOT "/noexist";
+  if (is_empty(noexist)) {
+printf("%s should not exist\n", noexist);
 exit(1);
   }
-  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
-  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+  char *emptydir = TEST_ROOT "/emptydir";
+  mkdir(emptydir, S_IRWXU);
+  if (!is_empty(emptydir)) {
+printf("FAIL: %s should be empty\n", emptydir);
 exit(1);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via rkanter)

2018-07-18 Thread rkanter
Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via 
rkanter)

(cherry picked from commit 0838fe833738e04f5e6f6408e97866d77bebbf30)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1dc4ca2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1dc4ca2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1dc4ca2

Branch: refs/heads/branch-3.0
Commit: c1dc4ca2c6080377159157ce97bf5d72fa3285a1
Parents: d61d842
Author: Robert Kanter 
Authored: Mon Jul 9 10:37:20 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:10:57 2018 -0700

--
 .../impl/container-executor.c   | 30 +++-
 .../test/test-container-executor.c  | 20 +
 2 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1dc4ca2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 8959f32..edb8f33 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -2231,6 +2231,28 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
   free(path_tmp);
 }
 
+int is_empty(char *target_dir) {
+  DIR *dir = NULL;
+  struct dirent *entry = NULL;
+  dir = opendir(target_dir);
+  if (!dir) {
+fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir,
+strerror(errno));
+return 0;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+if (strcmp(entry->d_name, ".") == 0) {
+  continue;
+}
+if (strcmp(entry->d_name, "..") == 0) {
+  continue;
+}
+fprintf(LOGFILE, "Directory is not empty %s\n", target_dir);
+return 0;
+  }
+  return 1;
+}
+
 /**
  * Mount a cgroup controller at the requested mount point and create
  * a hierarchy for the Hadoop NodeManager to manage.
@@ -2265,7 +2287,13 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
 result = -1;
   } else {
 if (strstr(mount_path, "..") != NULL) {
-  fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+  fprintf(LOGFILE, "Unsupported cgroup mount path detected. %s\n",
+  mount_path);
+  result = INVALID_COMMAND_PROVIDED;
+  goto cleanup;
+}
+if (!is_empty(mount_path)) {
+  fprintf(LOGFILE, "cgroup mount path is not empty. %s\n", mount_path);
   result = INVALID_COMMAND_PROVIDED;
   goto cleanup;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1dc4ca2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 8b0c0fa..3979708 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1197,6 +1197,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+void test_is_empty() {
+  printf("\nTesting is_empty function\n");
+  if (is_empty("/")) {
+printf("FAIL: / should not be empty\n");
+exit(1);
+  }
+  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+exit(1);
+  }
+  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
+  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+exit(1);
+  }
+}
+
 // This test is expected to be executed either by a regu

[3/3] hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

2018-07-18 Thread rkanter
Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

(cherry picked from commit 351cf87c92872d90f62c476f85ae4d02e485769c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27e2b4b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27e2b4b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27e2b4b3

Branch: refs/heads/branch-3.1
Commit: 27e2b4b36456ea5f42d38329dcc6bee0cb7b7ac0
Parents: d82edec
Author: Robert Kanter 
Authored: Thu Jun 7 17:09:34 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:07:48 2018 -0700

--
 .../impl/container-executor.c   | 54 ++--
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 19 ---
 .../src/site/markdown/NodeManagerCgroups.md |  2 +-
 4 files changed, 55 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27e2b4b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 1b8842a..baf0e8b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -73,6 +73,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"yarn", 
"mapred", "hdfs", "bin", 0}
 
 static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0;
 static const int DEFAULT_TC_SUPPORT_ENABLED = 0;
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
 
 static const char* PROC_PATH = "/proc";
 
@@ -482,6 +483,12 @@ int is_tc_support_enabled() {
 DEFAULT_TC_SUPPORT_ENABLED, &executor_cfg);
 }
 
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED,
+  &executor_cfg);
+}
+
 /**
  * Utility function to concatenate argB to argA using the concat_pattern.
  */
@@ -2346,20 +2353,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
 while ((ep = readdir(dp)) != NULL) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -2383,11 +2395,16 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(len);
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
-  struct stat sb;
 
   if (controller == NULL || mount_path == NULL) {
 fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n");
 result = OUT_OF_MEMORY;
+goto cleanup;
+  }
+  if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) {
+fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n");
+result = INVALID_COMMAND_PROVIDED;
+goto cleanup;
   }
   if (get_kv_key(pair, controller, len) < 0 ||
   get_kv_value(pair, mount_path, len) < 0) {
@@ -2395,13 +2412,10 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   pair);
 result = -1;
   } else {
-if (stat(mount_path, &sb) != 0) {
-  // Create mount point, if it does not exist
-  const mode_t mount_perms = S_IRWXU | S_IRGRP | 

[1/3] hadoop git commit: YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via rkanter)

2018-07-18 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d82edec3c -> dfa71428e


YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via 
rkanter)

(cherry picked from commit 1bc106a738a6ce4f7ed025d556bb44c1ede022e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfa71428
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfa71428
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfa71428

Branch: refs/heads/branch-3.1
Commit: dfa71428ea19835ba84d97f98ca78ec02790a209
Parents: 1c7d916
Author: Robert Kanter 
Authored: Thu Jul 12 16:38:46 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:07:48 2018 -0700

--
 .../container-executor/test/test-container-executor.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfa71428/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index a199d84..5607823 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,19 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+int is_empty(char *name);
+
 void test_is_empty() {
   printf("\nTesting is_empty function\n");
   if (is_empty("/")) {
 printf("FAIL: / should not be empty\n");
 exit(1);
   }
-  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+  char *noexist = TEST_ROOT "/noexist";
+  if (is_empty(noexist)) {
+printf("%s should not exist\n", noexist);
 exit(1);
   }
-  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
-  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+  char *emptydir = TEST_ROOT "/emptydir";
+  mkdir(emptydir, S_IRWXU);
+  if (!is_empty(emptydir)) {
+printf("FAIL: %s should be empty\n", emptydir);
 exit(1);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via rkanter)

2018-07-18 Thread rkanter
Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via 
rkanter)

(cherry picked from commit 0838fe833738e04f5e6f6408e97866d77bebbf30)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c7d9163
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c7d9163
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c7d9163

Branch: refs/heads/branch-3.1
Commit: 1c7d916347d1c68ad32b592d764890b40b66e558
Parents: 27e2b4b
Author: Robert Kanter 
Authored: Mon Jul 9 10:37:20 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:07:48 2018 -0700

--
 .../impl/container-executor.c   | 30 +++-
 .../test/test-container-executor.c  | 20 +
 2 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7d9163/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index baf0e8b..eff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -2379,6 +2379,28 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
   free(path_tmp);
 }
 
+int is_empty(char *target_dir) {
+  DIR *dir = NULL;
+  struct dirent *entry = NULL;
+  dir = opendir(target_dir);
+  if (!dir) {
+fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir,
+strerror(errno));
+return 0;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+if (strcmp(entry->d_name, ".") == 0) {
+  continue;
+}
+if (strcmp(entry->d_name, "..") == 0) {
+  continue;
+}
+fprintf(LOGFILE, "Directory is not empty %s\n", target_dir);
+return 0;
+  }
+  return 1;
+}
+
 /**
  * Mount a cgroup controller at the requested mount point and create
  * a hierarchy for the Hadoop NodeManager to manage.
@@ -2413,7 +2435,13 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
 result = -1;
   } else {
 if (strstr(mount_path, "..") != NULL) {
-  fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+  fprintf(LOGFILE, "Unsupported cgroup mount path detected. %s\n",
+  mount_path);
+  result = INVALID_COMMAND_PROVIDED;
+  goto cleanup;
+}
+if (!is_empty(mount_path)) {
+  fprintf(LOGFILE, "cgroup mount path is not empty. %s\n", mount_path);
   result = INVALID_COMMAND_PROVIDED;
   goto cleanup;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7d9163/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 3d32883..a199d84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,6 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+void test_is_empty() {
+  printf("\nTesting is_empty function\n");
+  if (is_empty("/")) {
+printf("FAIL: / should not be empty\n");
+exit(1);
+  }
+  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+exit(1);
+  }
+  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
+  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+exit(1);
+  }
+}
+
 // This test is expected to be executed either by a regu

hadoop git commit: YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via rkanter)

2018-07-12 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 556d9b36b -> 1bc106a73


YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bc106a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bc106a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bc106a7

Branch: refs/heads/trunk
Commit: 1bc106a738a6ce4f7ed025d556bb44c1ede022e3
Parents: 556d9b3
Author: Robert Kanter 
Authored: Thu Jul 12 16:38:46 2018 -0700
Committer: Robert Kanter 
Committed: Thu Jul 12 16:38:46 2018 -0700

--
 .../container-executor/test/test-container-executor.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bc106a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index a199d84..5607823 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,19 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+int is_empty(char *name);
+
 void test_is_empty() {
   printf("\nTesting is_empty function\n");
   if (is_empty("/")) {
 printf("FAIL: / should not be empty\n");
 exit(1);
   }
-  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+  char *noexist = TEST_ROOT "/noexist";
+  if (is_empty(noexist)) {
+printf("%s should not exist\n", noexist);
 exit(1);
   }
-  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
-  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+  char *emptydir = TEST_ROOT "/emptydir";
+  mkdir(emptydir, S_IRWXU);
+  if (!is_empty(emptydir)) {
+printf("FAIL: %s should be empty\n", emptydir);
 exit(1);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via rkanter)

2018-07-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk eecb5baaa -> 0838fe833


Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0838fe83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0838fe83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0838fe83

Branch: refs/heads/trunk
Commit: 0838fe833738e04f5e6f6408e97866d77bebbf30
Parents: eecb5ba
Author: Robert Kanter 
Authored: Mon Jul 9 10:37:20 2018 -0700
Committer: Robert Kanter 
Committed: Mon Jul 9 10:37:20 2018 -0700

--
 .../impl/container-executor.c   | 30 +++-
 .../test/test-container-executor.c  | 20 +
 2 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0838fe83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index baf0e8b..eff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -2379,6 +2379,28 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
   free(path_tmp);
 }
 
+int is_empty(char *target_dir) {
+  DIR *dir = NULL;
+  struct dirent *entry = NULL;
+  dir = opendir(target_dir);
+  if (!dir) {
+fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir,
+strerror(errno));
+return 0;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+if (strcmp(entry->d_name, ".") == 0) {
+  continue;
+}
+if (strcmp(entry->d_name, "..") == 0) {
+  continue;
+}
+fprintf(LOGFILE, "Directory is not empty %s\n", target_dir);
+return 0;
+  }
+  return 1;
+}
+
 /**
  * Mount a cgroup controller at the requested mount point and create
  * a hierarchy for the Hadoop NodeManager to manage.
@@ -2413,7 +2435,13 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
 result = -1;
   } else {
 if (strstr(mount_path, "..") != NULL) {
-  fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+  fprintf(LOGFILE, "Unsupported cgroup mount path detected. %s\n",
+  mount_path);
+  result = INVALID_COMMAND_PROVIDED;
+  goto cleanup;
+}
+if (!is_empty(mount_path)) {
+  fprintf(LOGFILE, "cgroup mount path is not empty. %s\n", mount_path);
   result = INVALID_COMMAND_PROVIDED;
   goto cleanup;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0838fe83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 3d32883..a199d84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,6 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+void test_is_empty() {
+  printf("\nTesting is_empty function\n");
+  if (is_empty("/")) {
+printf("FAIL: / should not be empty\n");
+exit(1);
+  }
+  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+exit(1);
+  }
+  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
+  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+exit(1);
+  }
+}
+
 // This test is expected to be executed either by a r

[2/2] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

2018-07-05 Thread rkanter
YARN-7451. Add missing tests to verify the presence of custom resources of RM 
apps and scheduler webservice endpoints (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99febe7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99febe7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99febe7f

Branch: refs/heads/trunk
Commit: 99febe7fd50c31c0f5dd40fa7f376f2c1f64f8c3
Parents: 1726247
Author: Robert Kanter 
Authored: Thu Jul 5 10:54:19 2018 -0700
Committer: Robert Kanter 
Committed: Thu Jul 5 10:54:19 2018 -0700

--
 .../resourcemanager/webapp/dao/AppInfo.java |   2 +-
 .../webapp/dao/SchedulerInfo.java   |   8 +-
 .../fair/TestFairSchedulerConfiguration.java|   9 +-
 .../webapp/TestRMWebServices.java   |  31 ++-
 .../webapp/TestRMWebServicesApps.java   |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 +
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 +
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 +++
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ++
 .../FairSchedulerJsonVerifications.java | 139 ++
 .../FairSchedulerXmlVerifications.java  | 153 +++
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 +++
 .../webapp/helper/AppInfoJsonVerifications.java | 123 +
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 +
 .../webapp/helper/BufferedClientResponse.java   |  57 
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ++
 .../ResourceRequestsJsonVerifications.java  | 252 +
 .../ResourceRequestsXmlVerifications.java   | 215 +++
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 
 21 files changed, 2020 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index d47f13d..9d82bc7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -479,7 +479,7 @@ public class AppInfo {
   public int getNumNonAMContainersPreempted() {
 return numNonAMContainerPreempted;
   }
-  
+
   public int getNumAMContainersPreempted() {
 return numAMContainerPreempted;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index 81491b1..163f707 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -41,8 +41,9 @@ public class SchedulerInfo {
   protected EnumSet schedulingResourceTypes;
   protected int maximumClusterPriority;
 
+  // JAXB needs this
   public SchedulerInfo() {
-  } // JAXB needs this
+  }
 
   public SchedulerInfo(final ResourceManager rm) {
 ResourceScheduler rs = rm.getResourceScheduler();
@@ -74,7 +75,10 @@ public class SchedulerInfo {
   }
 
   public String getSchedulerResourceTypes() {
-return Arrays.toString(minAllocResource.getResource

[1/2] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

2018-07-05 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 172624702 -> 99febe7fd


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
new file mode 100644
index 000..7c5b6db
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.w3c.dom.Element;
+import static 
org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlFloat;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * XML documents.
+ */
+public final class AppInfoXmlVerifications {
+
+  private AppInfoXmlVerifications() {
+//utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param info
+   * @param  app  an RMApp instance that contains the required values
+   */
+  public static void verify(Element info, RMApp app) {
+checkStringMatch("id", app.getApplicationId()
+.toString(), getXmlString(info, "id"));
+checkStringMatch("user", app.getUser(),
+getXmlString(info, "user"));
+checkStringMatch("name", app.getName(),
+getXmlString(info, "name"));
+checkStringMatch("applicationType",
+app.getApplicationType(), getXmlString(info, "applicationType"));
+checkStringMatch("queue", app.getQueue(),
+getXmlString(info, "queue"));
+assertEquals("priority doesn't match", 0, getXmlInt(info, "priority"));
+checkStringMatch("state", app.getState().toString(),
+getXmlString(info, "state"));
+checkStringMatch("finalStatus", app
+.getFinalApplicationStatus().toString(),
+getXmlString(info, "finalStatus"));
+assertEquals("progress doesn't match", 0, getXmlFloat(info, "progress"),
+0.0);
+if ("UNASSIGNED".equals(getXmlString(info, "trackingUI"))) {
+  checkStringMatch("trackingUI", "UNASSIGNED",
+  getXmlString(info, "trackingUI"));
+}
+WebServicesTestUtils.checkStringEqual("diagnostics",
+app.getDiagnostics().toString(), getXmlString(info, 
"diagnostics"));
+assertEquals("clusterId doesn't match",
+ResourceManager.getClusterTimeStamp(),
+getXmlLong(info, "clusterId"));
+assertEquals("startedTime doesn't match", app.getStartTime(),
+getXmlLong(info, "startedTime"));
+assertEquals("finishedTime doesn't match", app.getFinishTime(),
+getXmlLong(info, "finishedTime"));
+as

hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

2018-06-07 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk d5eca1a6a -> 351cf87c9


Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/351cf87c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/351cf87c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/351cf87c

Branch: refs/heads/trunk
Commit: 351cf87c92872d90f62c476f85ae4d02e485769c
Parents: d5eca1a
Author: Robert Kanter 
Authored: Thu Jun 7 17:09:34 2018 -0700
Committer: Robert Kanter 
Committed: Thu Jun 7 17:09:34 2018 -0700

--
 .../impl/container-executor.c   | 54 ++--
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 19 ---
 .../src/site/markdown/NodeManagerCgroups.md |  2 +-
 4 files changed, 55 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/351cf87c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 1b8842a..baf0e8b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -73,6 +73,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"yarn", 
"mapred", "hdfs", "bin", 0}
 
 static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0;
 static const int DEFAULT_TC_SUPPORT_ENABLED = 0;
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
 
 static const char* PROC_PATH = "/proc";
 
@@ -482,6 +483,12 @@ int is_tc_support_enabled() {
 DEFAULT_TC_SUPPORT_ENABLED, &executor_cfg);
 }
 
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED,
+  &executor_cfg);
+}
+
 /**
  * Utility function to concatenate argB to argA using the concat_pattern.
  */
@@ -2346,20 +2353,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
 while ((ep = readdir(dp)) != NULL) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -2383,11 +2395,16 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(len);
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
-  struct stat sb;
 
   if (controller == NULL || mount_path == NULL) {
 fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n");
 result = OUT_OF_MEMORY;
+goto cleanup;
+  }
+  if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) {
+fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n");
+result = INVALID_COMMAND_PROVIDED;
+goto cleanup;
   }
   if (get_kv_key(pair, controller, len) < 0 ||
   get_kv_value(pair, mount_path, len) < 0) {
@@ -2395,13 +2412,10 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   pair);
 result = -1;
   } else {
-if (stat(mount_path, &sb) != 0) {
-  // Create mount point, if it does not exist
-  const mode_t mount_perms = S_IRWXU | S_IRGRP

hadoop git commit: YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race condition (wilfreds and gphillips via rkanter)

2018-06-04 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 ca713e8c7 -> ea28c8db4


YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race 
condition (wilfreds and gphillips via rkanter)

(cherry picked from commit f97bd6bb7f7b4e7a99a572d9d9f9232702ea8318)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea28c8db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea28c8db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea28c8db

Branch: refs/heads/branch-2.9
Commit: ea28c8db40db69473c3f6d728831ada14135bcd5
Parents: ca713e8
Author: Robert Kanter 
Authored: Mon Jun 4 15:59:27 2018 -0700
Committer: Robert Kanter 
Committed: Mon Jun 4 16:34:39 2018 -0700

--
 .../scheduler/AbstractYarnScheduler.java| 22 
 .../scheduler/fair/FairScheduler.java   |  2 +-
 .../scheduler/fifo/FifoScheduler.java   |  4 +-
 .../capacity/TestCapacityScheduler.java | 51 --
 .../scheduler/fair/TestFairScheduler.java   | 43 +++
 .../scheduler/fifo/TestFifoScheduler.java   | 55 +---
 6 files changed, 122 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea28c8db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index fadab6a..4f51e4e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1087,6 +1087,10 @@ public abstract class AbstractYarnScheduler
 // Process new container information
 List completedContainers = updateNewContainerInfo(nm);
 
+// NOTICE: it is possible to not find the NodeID as a node can be
+// decommissioned at the same time. Skip updates if node is null.
+SchedulerNode schedulerNode = getNode(nm.getNodeID());
+
 // Process completed containers
 Resource releasedResources = Resource.newInstance(0, 0);
 int releasedContainers = updateCompletedContainers(completedContainers,
@@ -1095,26 +1099,26 @@ public abstract class AbstractYarnScheduler
 // If the node is decommissioning, send an update to have the total
 // resource equal to the used resource, so no available resource to
 // schedule.
-// TODO YARN-5128: Fix possible race-condition when request comes in before
-// update is propagated
-if (nm.getState() == NodeState.DECOMMISSIONING) {
+if (nm.getState() == NodeState.DECOMMISSIONING && schedulerNode != null) {
   this.rmContext
   .getDispatcher()
   .getEventHandler()
   .handle(
   new RMNodeResourceUpdateEvent(nm.getNodeID(), ResourceOption
-  .newInstance(getSchedulerNode(nm.getNodeID())
-  .getAllocatedResource(), 0)));
+  .newInstance(schedulerNode.getAllocatedResource(), 0)));
 }
 
 updateSchedulerHealthInformation(releasedResources, releasedContainers);
-updateNodeResourceUtilization(nm);
+if (schedulerNode != null) {
+  updateNodeResourceUtilization(nm);
+}
 
 // Now node data structures are up-to-date and ready for scheduling.
 if(LOG.isDebugEnabled()) {
-  SchedulerNode node = getNode(nm.getNodeID());
-  LOG.debug("Node being looked for scheduling " + nm +
-  " availableResource: " + node.getUnallocatedResource());
+  LOG.debug(
+  "Node being looked for scheduling " + nm + " availableResource: " +
+  (schedulerNode == null ? "unknown (decomissioned)" :
+  schedulerNode.getUnallocatedResource()));
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea28c8db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/h

hadoop git commit: YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race condition (wilfreds and gphillips via rkanter)

2018-06-04 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0caf40efe -> f97bd6bb7


YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race 
condition (wilfreds and gphillips via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f97bd6bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f97bd6bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f97bd6bb

Branch: refs/heads/branch-2
Commit: f97bd6bb7f7b4e7a99a572d9d9f9232702ea8318
Parents: 0caf40e
Author: Robert Kanter 
Authored: Mon Jun 4 15:59:27 2018 -0700
Committer: Robert Kanter 
Committed: Mon Jun 4 15:59:27 2018 -0700

--
 .../scheduler/AbstractYarnScheduler.java| 22 
 .../scheduler/fair/FairScheduler.java   |  2 +-
 .../scheduler/fifo/FifoScheduler.java   |  4 +-
 .../capacity/TestCapacityScheduler.java | 51 --
 .../scheduler/fair/TestFairScheduler.java   | 43 +++
 .../scheduler/fifo/TestFifoScheduler.java   | 55 +---
 6 files changed, 122 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f97bd6bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index fadab6a..4f51e4e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1087,6 +1087,10 @@ public abstract class AbstractYarnScheduler
 // Process new container information
 List completedContainers = updateNewContainerInfo(nm);
 
+// NOTICE: it is possible to not find the NodeID as a node can be
+// decommissioned at the same time. Skip updates if node is null.
+SchedulerNode schedulerNode = getNode(nm.getNodeID());
+
 // Process completed containers
 Resource releasedResources = Resource.newInstance(0, 0);
 int releasedContainers = updateCompletedContainers(completedContainers,
@@ -1095,26 +1099,26 @@ public abstract class AbstractYarnScheduler
 // If the node is decommissioning, send an update to have the total
 // resource equal to the used resource, so no available resource to
 // schedule.
-// TODO YARN-5128: Fix possible race-condition when request comes in before
-// update is propagated
-if (nm.getState() == NodeState.DECOMMISSIONING) {
+if (nm.getState() == NodeState.DECOMMISSIONING && schedulerNode != null) {
   this.rmContext
   .getDispatcher()
   .getEventHandler()
   .handle(
   new RMNodeResourceUpdateEvent(nm.getNodeID(), ResourceOption
-  .newInstance(getSchedulerNode(nm.getNodeID())
-  .getAllocatedResource(), 0)));
+  .newInstance(schedulerNode.getAllocatedResource(), 0)));
 }
 
 updateSchedulerHealthInformation(releasedResources, releasedContainers);
-updateNodeResourceUtilization(nm);
+if (schedulerNode != null) {
+  updateNodeResourceUtilization(nm);
+}
 
 // Now node data structures are up-to-date and ready for scheduling.
 if(LOG.isDebugEnabled()) {
-  SchedulerNode node = getNode(nm.getNodeID());
-  LOG.debug("Node being looked for scheduling " + nm +
-  " availableResource: " + node.getUnallocatedResource());
+  LOG.debug(
+  "Node being looked for scheduling " + nm + " availableResource: " +
+  (schedulerNode == null ? "unknown (decomissioned)" :
+  schedulerNode.getUnallocatedResource()));
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f97bd6bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-serve

hadoop git commit: YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race condition (wilfreds and gphillips via rkanter)

2018-06-04 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 9fe265f44 -> d5e6d0d5f


YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race 
condition (wilfreds and gphillips via rkanter)

(cherry picked from commit 0cd145a44390bc1a01113dce4be4e629637c3e8a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5e6d0d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5e6d0d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5e6d0d5

Branch: refs/heads/branch-3.0
Commit: d5e6d0d5f4fc8ca15b24d129f4fff9b1717888f9
Parents: 9fe265f
Author: Robert Kanter 
Authored: Mon Jun 4 15:32:03 2018 -0700
Committer: Robert Kanter 
Committed: Mon Jun 4 15:42:46 2018 -0700

--
 .../scheduler/AbstractYarnScheduler.java| 19 +
 .../scheduler/fair/FairScheduler.java   |  2 +-
 .../scheduler/fifo/FifoScheduler.java   |  6 ++-
 .../capacity/TestCapacityScheduler.java | 44 ++--
 .../scheduler/fair/TestFairScheduler.java   | 43 ++-
 .../scheduler/fifo/TestFifoScheduler.java   | 44 +++-
 6 files changed, 115 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5e6d0d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 62098e6..7847573 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1088,12 +1088,16 @@ public abstract class AbstractYarnScheduler
 }
 
 // Process new container information
+// NOTICE: it is possible to not find the NodeID as a node can be
+// decommissioned at the same time. Skip updates if node is null.
 SchedulerNode schedulerNode = getNode(nm.getNodeID());
 List completedContainers = updateNewContainerInfo(nm,
 schedulerNode);
 
 // Notify Scheduler Node updated.
-schedulerNode.notifyNodeUpdate();
+if (schedulerNode != null) {
+  schedulerNode.notifyNodeUpdate();
+}
 
 // Process completed containers
 Resource releasedResources = Resource.newInstance(0, 0);
@@ -1103,9 +1107,7 @@ public abstract class AbstractYarnScheduler
 // If the node is decommissioning, send an update to have the total
 // resource equal to the used resource, so no available resource to
 // schedule.
-// TODO YARN-5128: Fix possible race-condition when request comes in before
-// update is propagated
-if (nm.getState() == NodeState.DECOMMISSIONING) {
+if (nm.getState() == NodeState.DECOMMISSIONING && schedulerNode != null) {
   this.rmContext
   .getDispatcher()
   .getEventHandler()
@@ -1115,13 +1117,16 @@ public abstract class AbstractYarnScheduler
 }
 
 updateSchedulerHealthInformation(releasedResources, releasedContainers);
-updateNodeResourceUtilization(nm, schedulerNode);
+if (schedulerNode != null) {
+  updateNodeResourceUtilization(nm, schedulerNode);
+}
 
 // Now node data structures are up-to-date and ready for scheduling.
 if(LOG.isDebugEnabled()) {
   LOG.debug(
-  "Node being looked for scheduling " + nm + " availableResource: "
-  + schedulerNode.getUnallocatedResource());
+  "Node being looked for scheduling " + nm + " availableResource: " +
+  (schedulerNode == null ? "unknown (decommissioned)" :
+  schedulerNode.getUnallocatedResource()));
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5e6d0d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java

hadoop git commit: YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race condition (wilfreds and gphillips via rkanter)

2018-06-04 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 10e35ed32 -> 93d6ed859


YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race 
condition (wilfreds and gphillips via rkanter)

(cherry picked from commit 0cd145a44390bc1a01113dce4be4e629637c3e8a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93d6ed85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93d6ed85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93d6ed85

Branch: refs/heads/branch-3.1
Commit: 93d6ed859e92dc132df2b3ae92fabbe5f958004d
Parents: 10e35ed
Author: Robert Kanter 
Authored: Mon Jun 4 15:32:03 2018 -0700
Committer: Robert Kanter 
Committed: Mon Jun 4 15:32:47 2018 -0700

--
 .../scheduler/AbstractYarnScheduler.java| 19 +
 .../scheduler/fair/FairScheduler.java   |  2 +-
 .../scheduler/fifo/FifoScheduler.java   |  6 ++-
 .../capacity/TestCapacityScheduler.java | 44 ++--
 .../scheduler/fair/TestFairScheduler.java   | 43 ++-
 .../scheduler/fifo/TestFifoScheduler.java   | 44 +++-
 6 files changed, 115 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d6ed85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 18c7b4e..d2e81a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1106,12 +1106,16 @@ public abstract class AbstractYarnScheduler
 }
 
 // Process new container information
+// NOTICE: it is possible to not find the NodeID as a node can be
+// decommissioned at the same time. Skip updates if node is null.
 SchedulerNode schedulerNode = getNode(nm.getNodeID());
 List completedContainers = updateNewContainerInfo(nm,
 schedulerNode);
 
 // Notify Scheduler Node updated.
-schedulerNode.notifyNodeUpdate();
+if (schedulerNode != null) {
+  schedulerNode.notifyNodeUpdate();
+}
 
 // Process completed containers
 Resource releasedResources = Resource.newInstance(0, 0);
@@ -1121,9 +1125,7 @@ public abstract class AbstractYarnScheduler
 // If the node is decommissioning, send an update to have the total
 // resource equal to the used resource, so no available resource to
 // schedule.
-// TODO YARN-5128: Fix possible race-condition when request comes in before
-// update is propagated
-if (nm.getState() == NodeState.DECOMMISSIONING) {
+if (nm.getState() == NodeState.DECOMMISSIONING && schedulerNode != null) {
   this.rmContext
   .getDispatcher()
   .getEventHandler()
@@ -1133,13 +1135,16 @@ public abstract class AbstractYarnScheduler
 }
 
 updateSchedulerHealthInformation(releasedResources, releasedContainers);
-updateNodeResourceUtilization(nm, schedulerNode);
+if (schedulerNode != null) {
+  updateNodeResourceUtilization(nm, schedulerNode);
+}
 
 // Now node data structures are up-to-date and ready for scheduling.
 if(LOG.isDebugEnabled()) {
   LOG.debug(
-  "Node being looked for scheduling " + nm + " availableResource: "
-  + schedulerNode.getUnallocatedResource());
+  "Node being looked for scheduling " + nm + " availableResource: " +
+  (schedulerNode == null ? "unknown (decommissioned)" :
+  schedulerNode.getUnallocatedResource()));
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d6ed85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java

hadoop git commit: YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race condition (wilfreds and gphillips via rkanter)

2018-06-04 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 04cf699dd -> 0cd145a44


YARN-4677. RMNodeResourceUpdateEvent update from scheduler can lead to race 
condition (wilfreds and gphillips via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cd145a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cd145a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cd145a4

Branch: refs/heads/trunk
Commit: 0cd145a44390bc1a01113dce4be4e629637c3e8a
Parents: 04cf699
Author: Robert Kanter 
Authored: Mon Jun 4 15:32:03 2018 -0700
Committer: Robert Kanter 
Committed: Mon Jun 4 15:32:03 2018 -0700

--
 .../scheduler/AbstractYarnScheduler.java| 19 +
 .../scheduler/fair/FairScheduler.java   |  2 +-
 .../scheduler/fifo/FifoScheduler.java   |  6 ++-
 .../capacity/TestCapacityScheduler.java | 44 ++--
 .../scheduler/fair/TestFairScheduler.java   | 43 ++-
 .../scheduler/fifo/TestFifoScheduler.java   | 44 +++-
 6 files changed, 115 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd145a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 18c7b4e..d2e81a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1106,12 +1106,16 @@ public abstract class AbstractYarnScheduler
 }
 
 // Process new container information
+// NOTICE: it is possible to not find the NodeID as a node can be
+// decommissioned at the same time. Skip updates if node is null.
 SchedulerNode schedulerNode = getNode(nm.getNodeID());
 List completedContainers = updateNewContainerInfo(nm,
 schedulerNode);
 
 // Notify Scheduler Node updated.
-schedulerNode.notifyNodeUpdate();
+if (schedulerNode != null) {
+  schedulerNode.notifyNodeUpdate();
+}
 
 // Process completed containers
 Resource releasedResources = Resource.newInstance(0, 0);
@@ -1121,9 +1125,7 @@ public abstract class AbstractYarnScheduler
 // If the node is decommissioning, send an update to have the total
 // resource equal to the used resource, so no available resource to
 // schedule.
-// TODO YARN-5128: Fix possible race-condition when request comes in before
-// update is propagated
-if (nm.getState() == NodeState.DECOMMISSIONING) {
+if (nm.getState() == NodeState.DECOMMISSIONING && schedulerNode != null) {
   this.rmContext
   .getDispatcher()
   .getEventHandler()
@@ -1133,13 +1135,16 @@ public abstract class AbstractYarnScheduler
 }
 
 updateSchedulerHealthInformation(releasedResources, releasedContainers);
-updateNodeResourceUtilization(nm, schedulerNode);
+if (schedulerNode != null) {
+  updateNodeResourceUtilization(nm, schedulerNode);
+}
 
 // Now node data structures are up-to-date and ready for scheduling.
 if(LOG.isDebugEnabled()) {
   LOG.debug(
-  "Node being looked for scheduling " + nm + " availableResource: "
-  + schedulerNode.getUnallocatedResource());
+  "Node being looked for scheduling " + nm + " availableResource: " +
+  (schedulerNode == null ? "unknown (decommissioned)" :
+  schedulerNode.getUnallocatedResource()));
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd145a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-y

hadoop git commit: HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets via rkanter)

2018-05-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk bc6d9d4c7 -> aa23d49fc


HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets 
via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa23d49f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa23d49f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa23d49f

Branch: refs/heads/trunk
Commit: aa23d49fc8b9c2537529dbdc13512000e2ab295a
Parents: bc6d9d4
Author: Robert Kanter 
Authored: Wed May 23 10:23:17 2018 -0700
Committer: Robert Kanter 
Committed: Wed May 23 10:24:09 2018 -0700

--
 .../org/apache/hadoop/http/HttpServer2.java | 79 +++-
 .../org/apache/hadoop/http/TestHttpServer.java  | 61 +++
 2 files changed, 121 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa23d49f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 47ca841..c273c78 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -34,6 +34,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
@@ -172,10 +174,16 @@ public final class HttpServer2 implements FilterContainer 
{
   private final SignerSecretProvider secretProvider;
   private XFrameOption xFrameOption;
   private boolean xFrameOptionIsEnabled;
-  private static final String X_FRAME_VALUE = "xFrameOption";
-  private static final String X_FRAME_ENABLED = "X_FRAME_ENABLED";
-
-
+  public static final String HTTP_HEADER_PREFIX = "hadoop.http.header.";
+  private static final String HTTP_HEADER_REGEX =
+  "hadoop\\.http\\.header\\.([a-zA-Z\\-_]+)";
+  static final String X_XSS_PROTECTION  =
+  "X-XSS-Protection:1; mode=block";
+  static final String X_CONTENT_TYPE_OPTIONS =
+  "X-Content-Type-Options:nosniff";
+  private static final String X_FRAME_OPTIONS = "X-FRAME-OPTIONS";
+  private static final Pattern PATTERN_HTTP_HEADER_REGEX =
+  Pattern.compile(HTTP_HEADER_REGEX);
   /**
* Class to construct instances of HTTP server with specific options.
*/
@@ -574,10 +582,7 @@ public final class HttpServer2 implements FilterContainer {
 addDefaultApps(contexts, appDir, conf);
 webServer.setHandler(handlers);
 
-Map xFrameParams = new HashMap<>();
-xFrameParams.put(X_FRAME_ENABLED,
-String.valueOf(this.xFrameOptionIsEnabled));
-xFrameParams.put(X_FRAME_VALUE,  this.xFrameOption.toString());
+Map xFrameParams = setHeaders(conf);
 addGlobalFilter("safety", QuotingInputFilter.class.getName(), 
xFrameParams);
 final FilterInitializer[] initializers = getFilterInitializers(conf);
 if (initializers != null) {
@@ -1475,9 +1480,11 @@ public final class HttpServer2 implements 
FilterContainer {
   public static class QuotingInputFilter implements Filter {
 
 private FilterConfig config;
+private Map headerMap;
 
 public static class RequestQuoter extends HttpServletRequestWrapper {
   private final HttpServletRequest rawRequest;
+
   public RequestQuoter(HttpServletRequest rawRequest) {
 super(rawRequest);
 this.rawRequest = rawRequest;
@@ -1566,6 +1573,7 @@ public final class HttpServer2 implements FilterContainer 
{
 @Override
 public void init(FilterConfig config) throws ServletException {
   this.config = config;
+  initHttpHeaderMap();
 }
 
 @Override
@@ -1593,11 +1601,7 @@ public final class HttpServer2 implements 
FilterContainer {
   } else if (mime.startsWith("application/xml")) {
 httpResponse.setContentType("text/xml; charset=utf-8");
   }
-
-  if(Boolean.valueOf(this.config.getInitParameter(X_FRAME_ENABLED))) {
-httpResponse.addHeader("X-FRAME-OPTIONS",
-this.config.getInitParameter(X_FRAME_VALUE));
-  }
+  headerMap.forEach((k, v) -> httpResponse.addHeader(k, v));
   chain.doFilter(quoted, httpResponse);
 }
 
@@ -1613,14 +1617,25 @@ public final class HttpServer2 implements 
FilterContainer {
   return (mime == null) ? null : mime;
 }
 
+

hadoop git commit: YARN-8273. Log aggregation does not warn if HDFS quota in target directory is exceeded (grepas via rkanter)

2018-05-22 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 83f53e5c6 -> b22f56c47


YARN-8273. Log aggregation does not warn if HDFS quota in target directory is 
exceeded (grepas via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b22f56c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b22f56c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b22f56c4

Branch: refs/heads/trunk
Commit: b22f56c4719e63bd4f6edc2a075e0bcdb9442255
Parents: 83f53e5
Author: Robert Kanter 
Authored: Tue May 22 14:24:38 2018 -0700
Committer: Robert Kanter 
Committed: Tue May 22 14:24:38 2018 -0700

--
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |  4 ++
 .../logaggregation/AggregatedLogFormat.java | 14 +++-
 .../LogAggregationDFSException.java | 45 
 .../LogAggregationFileController.java   |  4 +-
 .../tfile/LogAggregationTFileController.java| 13 +++-
 .../logaggregation/TestContainerLogsUtils.java  |  4 +-
 .../logaggregation/AppLogAggregatorImpl.java| 49 ++---
 .../TestAppLogAggregatorImpl.java   | 75 +---
 .../nodemanager/webapp/TestNMWebServices.java   |  7 +-
 9 files changed, 183 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index db6c11a..a25c524 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -40,6 +40,10 @@
   hadoop-common
   provided
 
+
+  org.apache.hadoop
+  hadoop-hdfs-client
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index af3066e..81d5053 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.io.Writable;
@@ -547,7 +548,7 @@ public class AggregatedLogFormat {
 }
 
 @Override
-public void close() {
+public void close() throws DSQuotaExceededException {
   try {
 if (writer != null) {
   writer.close();
@@ -555,7 +556,16 @@ public class AggregatedLogFormat {
   } catch (Exception e) {
 LOG.warn("Exception closing writer", e);
   } finally {
-IOUtils.cleanupWithLogger(LOG, this.fsDataOStream);
+try {
+  this.fsDataOStream.close();
+} catch (DSQuotaExceededException e) {
+  LOG.error("Exception in closing {}",
+  this.fsDataOStream.getClass(), e);
+  throw e;
+} catch (Throwable e) {
+  LOG.error("Exception in closing {}",
+  this.fsDataOStream.getClass(), e);
+}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b22f56c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
new file mode 100644
index 000..19953e4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationDFSException.java
@@ -0,0 +1,45 @@
+/**
+* Licensed to the Apache Software

[2/2] hadoop git commit: YARN-8202. DefaultAMSProcessor should properly check units of requested custom resource types against minimum/maximum allocation (snemeth via rkanter)

2018-05-10 Thread rkanter
YARN-8202. DefaultAMSProcessor should properly check units of requested custom 
resource types against minimum/maximum allocation (snemeth via rkanter)

(cherry picked from commit c8b53c43644b4ad22d5385c22cad8ed573c0b1ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0506c762
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0506c762
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0506c762

Branch: refs/heads/branch-3.1
Commit: 0506c762b2f96ccb2c12cce8fd4659536236354e
Parents: f4f2912
Author: Robert Kanter 
Authored: Thu May 10 09:31:59 2018 -0700
Committer: Robert Kanter 
Committed: Thu May 10 09:52:11 2018 -0700

--
 .../v2/app/rm/ContainerRequestCreator.java  |  57 ++
 .../v2/app/rm/TestRMContainerAllocator.java | 534 ++-
 .../hadoop/yarn/util/UnitsConversionUtil.java   |  44 +-
 .../resourcetypes/ResourceTypesTestHelper.java  |  93 
 .../hadoop/yarn/server/utils/BuilderUtils.java  |   8 +-
 .../scheduler/SchedulerUtils.java   |  95 +++-
 .../TestApplicationMasterService.java   | 185 +--
 .../scheduler/TestSchedulerUtils.java   | 278 +-
 8 files changed, 961 insertions(+), 333 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0506c762/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
new file mode 100644
index 000..39a9ddc
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+final class ContainerRequestCreator {
+
+  private ContainerRequestCreator() {}
+
+  static ContainerRequestEvent createRequest(JobId jobId, int taskAttemptId,
+  Resource resource, String[] hosts) {
+return createRequest(jobId, taskAttemptId, resource, hosts,
+false, false);
+  }
+
+  static ContainerRequestEvent createRequest(JobId jobId, int taskAttemptId,
+  Resource resource, String[] hosts, boolean earlierFailedAttempt,
+  boolean reduce) {
+final TaskId taskId;
+if (reduce) {
+  taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+} else {
+  taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+}
+TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
+taskAttemptId);
+
+if (earlierFailedAttempt) {
+  return ContainerRequestEvent
+  .createContainerRequestEventForFailedContainer(attemptId,
+  resource);
+}
+return new ContainerRequestEvent(attemptId, resource, hosts,
+new String[]{NetworkTopology.DEFAULT_RACK});
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0506c762/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapr

[1/2] hadoop git commit: YARN-8202. DefaultAMSProcessor should properly check units of requested custom resource types against minimum/maximum allocation (snemeth via rkanter)

2018-05-10 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f4f291282 -> 0506c762b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0506c762/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 90e4be8..9696741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -22,9 +22,13 @@ import static java.lang.Thread.sleep;
 import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB;
 import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES;
 
+
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
@@ -61,6 +65,7 @@ import 
org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
+import org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import 
org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -75,6 +80,9 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
+.FairSchedulerConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
@@ -365,7 +373,7 @@ public class TestApplicationMasterService {
   am2.addContainerToBeReleased(cId);
   try {
 am2.schedule();
-Assert.fail("Exception was expected!!");
+fail("Exception was expected!!");
   } catch (InvalidContainerReleaseException e) {
 StringBuilder sb = new StringBuilder("Cannot release container : ");
 sb.append(cId.toString());
@@ -460,7 +468,7 @@ public class TestApplicationMasterService {
   FinalApplicationStatus.FAILED, "", "");
   try {
 am1.unregisterAppAttempt(req, false);
-Assert.fail("ApplicationMasterNotRegisteredException should be 
thrown");
+fail("ApplicationMasterNotRegisteredException should be thrown");
   } catch (ApplicationMasterNotRegisteredException e) {
 Assert.assertNotNull(e);
 Assert.assertNotNull(e.getMessage());
@@ -468,7 +476,7 @@ public class TestApplicationMasterService {
 "Application Master is trying to unregister before registering 
for:"
 ));
   } catch (Exception e) {
-Assert.fail("ApplicationMasterNotRegisteredException should be 
thrown");
+fail("ApplicationMasterNotRegisteredException should be thrown");
   }
 
   am1.registerAppAttempt();
@@ -627,9 +635,7 @@ public class TestApplicationMasterService {
   Assert.assertEquals("UPDATE_OUTSTANDING_ERROR",
   response.getUpdateErrors().get(0).getReason());
 } finally {
-  if (rm != null) {
-rm.close();
-  }
+  rm.close();
 }
   }
 
@@ -709,34 +715,48 @@ public class TestApplicationMasterService {
 
 ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
 
-CapacitySchedulerConfiguration csconf =
-new CapacitySchedulerConfiguration();
-csconf.setResourceComparator(DominantResourceCalculator.class);
+final YarnConfiguration yarnConf;
+if (schedulerCls.getCanonicalName()
+.equals(CapacityScheduler.class.getCanonicalName())) {
+  CapacitySchedulerConf

[1/2] hadoop git commit: YARN-8202. DefaultAMSProcessor should properly check units of requested custom resource types against minimum/maximum allocation (snemeth via rkanter)

2018-05-10 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk ba051b068 -> c8b53c436


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8b53c43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 90e4be8..9696741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -22,9 +22,13 @@ import static java.lang.Thread.sleep;
 import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB;
 import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES;
 
+
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
@@ -61,6 +65,7 @@ import 
org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
+import org.apache.hadoop.yarn.resourcetypes.ResourceTypesTestHelper;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import 
org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -75,6 +80,9 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
+.FairSchedulerConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
@@ -365,7 +373,7 @@ public class TestApplicationMasterService {
   am2.addContainerToBeReleased(cId);
   try {
 am2.schedule();
-Assert.fail("Exception was expected!!");
+fail("Exception was expected!!");
   } catch (InvalidContainerReleaseException e) {
 StringBuilder sb = new StringBuilder("Cannot release container : ");
 sb.append(cId.toString());
@@ -460,7 +468,7 @@ public class TestApplicationMasterService {
   FinalApplicationStatus.FAILED, "", "");
   try {
 am1.unregisterAppAttempt(req, false);
-Assert.fail("ApplicationMasterNotRegisteredException should be 
thrown");
+fail("ApplicationMasterNotRegisteredException should be thrown");
   } catch (ApplicationMasterNotRegisteredException e) {
 Assert.assertNotNull(e);
 Assert.assertNotNull(e.getMessage());
@@ -468,7 +476,7 @@ public class TestApplicationMasterService {
 "Application Master is trying to unregister before registering 
for:"
 ));
   } catch (Exception e) {
-Assert.fail("ApplicationMasterNotRegisteredException should be 
thrown");
+fail("ApplicationMasterNotRegisteredException should be thrown");
   }
 
   am1.registerAppAttempt();
@@ -627,9 +635,7 @@ public class TestApplicationMasterService {
   Assert.assertEquals("UPDATE_OUTSTANDING_ERROR",
   response.getUpdateErrors().get(0).getReason());
 } finally {
-  if (rm != null) {
-rm.close();
-  }
+  rm.close();
 }
   }
 
@@ -709,34 +715,48 @@ public class TestApplicationMasterService {
 
 ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
 
-CapacitySchedulerConfiguration csconf =
-new CapacitySchedulerConfiguration();
-csconf.setResourceComparator(DominantResourceCalculator.class);
+final YarnConfiguration yarnConf;
+if (schedulerCls.getCanonicalName()
+.equals(CapacityScheduler.class.getCanonicalName())) {
+  CapacitySchedulerConfigura

[2/2] hadoop git commit: YARN-8202. DefaultAMSProcessor should properly check units of requested custom resource types against minimum/maximum allocation (snemeth via rkanter)

2018-05-10 Thread rkanter
YARN-8202. DefaultAMSProcessor should properly check units of requested custom 
resource types against minimum/maximum allocation (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8b53c43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8b53c43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8b53c43

Branch: refs/heads/trunk
Commit: c8b53c43644b4ad22d5385c22cad8ed573c0b1ba
Parents: ba051b0
Author: Robert Kanter 
Authored: Thu May 10 09:31:59 2018 -0700
Committer: Robert Kanter 
Committed: Thu May 10 09:31:59 2018 -0700

--
 .../v2/app/rm/ContainerRequestCreator.java  |  57 ++
 .../v2/app/rm/TestRMContainerAllocator.java | 534 ++-
 .../hadoop/yarn/util/UnitsConversionUtil.java   |  44 +-
 .../resourcetypes/ResourceTypesTestHelper.java  |  93 
 .../hadoop/yarn/server/utils/BuilderUtils.java  |   8 +-
 .../scheduler/SchedulerUtils.java   |  95 +++-
 .../TestApplicationMasterService.java   | 185 +--
 .../scheduler/TestSchedulerUtils.java   | 278 +-
 8 files changed, 961 insertions(+), 333 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8b53c43/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
new file mode 100644
index 000..39a9ddc
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestCreator.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.rm;
+
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+final class ContainerRequestCreator {
+
+  private ContainerRequestCreator() {}
+
+  static ContainerRequestEvent createRequest(JobId jobId, int taskAttemptId,
+  Resource resource, String[] hosts) {
+return createRequest(jobId, taskAttemptId, resource, hosts,
+false, false);
+  }
+
+  static ContainerRequestEvent createRequest(JobId jobId, int taskAttemptId,
+  Resource resource, String[] hosts, boolean earlierFailedAttempt,
+  boolean reduce) {
+final TaskId taskId;
+if (reduce) {
+  taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+} else {
+  taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+}
+TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
+taskAttemptId);
+
+if (earlierFailedAttempt) {
+  return ContainerRequestEvent
+  .createContainerRequestEventForFailedContainer(attemptId,
+  resource);
+}
+return new ContainerRequestEvent(attemptId, resource, hosts,
+new String[]{NetworkTopology.DEFAULT_RACK});
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8b53c43/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/ja

hadoop git commit: MAPREDUCE-7072. mapred job -history prints duplicate counter in human output (wilfreds via rkanter)

2018-04-27 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 600f4d402 -> b47275fe2


MAPREDUCE-7072. mapred job -history prints duplicate counter in human output 
(wilfreds via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b47275fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b47275fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b47275fe

Branch: refs/heads/branch-2
Commit: b47275fe28f51d14adf777822b45c0812e71a2a6
Parents: 600f4d4
Author: Robert Kanter 
Authored: Fri Apr 27 09:57:31 2018 -0700
Committer: Robert Kanter 
Committed: Fri Apr 27 09:57:31 2018 -0700

--
 .../HumanReadableHistoryViewerPrinter.java  |  3 +-
 .../jobhistory/JSONHistoryViewerPrinter.java|  3 +-
 .../jobhistory/TestHistoryViewerPrinter.java| 76 
 3 files changed, 80 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b47275fe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
index d3da9f4..fdf3c47 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
@@ -148,7 +148,8 @@ class HumanReadableHistoryViewerPrinter implements 
HistoryViewerPrinter {
   "Total Value"));
   buff.append("\n--" +
   "-");
-  for (String groupName : totalCounters.getGroupNames()) {
+  for (CounterGroup counterGroup : totalCounters) {
+String groupName = counterGroup.getName();
 CounterGroup totalGroup = totalCounters.getGroup(groupName);
 CounterGroup mapGroup = mapCounters.getGroup(groupName);
 CounterGroup reduceGroup = reduceCounters.getGroup(groupName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b47275fe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
index 456dcf7..850fe2f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
@@ -104,7 +104,8 @@ class JSONHistoryViewerPrinter implements 
HistoryViewerPrinter {
 // Killed jobs might not have counters
 if (totalCounters != null) {
   JSONObject jGroups = new JSONObject();
-  for (String groupName : totalCounters.getGroupNames()) {
+  for (CounterGroup counterGroup : totalCounters) {
+String groupName = counterGroup.getName();
 CounterGroup totalGroup = totalCounters.getGroup(groupName);
 CounterGroup mapGroup = mapCounters.getGroup(groupName);
 CounterGroup reduceGroup = reduceCounters.getGroup(groupName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b47275fe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
ind

hadoop git commit: MAPREDUCE-7072. mapred job -history prints duplicate counter in human output (wilfreds via rkanter)

2018-04-24 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 56788d759 -> 1b9ecc264


MAPREDUCE-7072. mapred job -history prints duplicate counter in human output 
(wilfreds via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b9ecc26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b9ecc26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b9ecc26

Branch: refs/heads/trunk
Commit: 1b9ecc264a6abe9d9d5412318c67d3d2936bd9ac
Parents: 56788d7
Author: Robert Kanter 
Authored: Tue Apr 24 11:30:38 2018 -0700
Committer: Robert Kanter 
Committed: Tue Apr 24 11:30:38 2018 -0700

--
 .../HumanReadableHistoryViewerPrinter.java  |  3 +-
 .../jobhistory/JSONHistoryViewerPrinter.java|  3 +-
 .../jobhistory/TestHistoryViewerPrinter.java| 76 
 3 files changed, 80 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b9ecc26/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
index 685fa05..060ba24 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
@@ -148,7 +148,8 @@ class HumanReadableHistoryViewerPrinter implements 
HistoryViewerPrinter {
   "Total Value"));
   buff.append("\n--" +
   "-");
-  for (String groupName : totalCounters.getGroupNames()) {
+  for (CounterGroup counterGroup : totalCounters) {
+String groupName = counterGroup.getName();
 CounterGroup totalGroup = totalCounters.getGroup(groupName);
 CounterGroup mapGroup = mapCounters.getGroup(groupName);
 CounterGroup reduceGroup = reduceCounters.getGroup(groupName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b9ecc26/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
index cfb6641..5f8e9ad 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
@@ -104,7 +104,8 @@ class JSONHistoryViewerPrinter implements 
HistoryViewerPrinter {
 // Killed jobs might not have counters
 if (totalCounters != null) {
   JSONObject jGroups = new JSONObject();
-  for (String groupName : totalCounters.getGroupNames()) {
+  for (CounterGroup counterGroup : totalCounters) {
+String groupName = counterGroup.getName();
 CounterGroup totalGroup = totalCounters.getGroup(groupName);
 CounterGroup mapGroup = mapCounters.getGroup(groupName);
 CounterGroup reduceGroup = reduceCounters.getGroup(groupName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b9ecc26/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
index 5885

hadoop git commit: HADOOP-15390. Yarn RM logs flooded by DelegationTokenRenewer trying to renew KMS tokens (xiaochen via rkanter)

2018-04-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 0539b7234 -> 74e5b4b43


HADOOP-15390. Yarn RM logs flooded by DelegationTokenRenewer trying to renew 
KMS tokens (xiaochen via rkanter)

(cherry picked from commit 7ab08a9c37a76edbe02d556fcfb2e637f45afc21)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74e5b4b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74e5b4b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74e5b4b4

Branch: refs/heads/branch-2.9
Commit: 74e5b4b437a0005676d51e770f7dd5110893e356
Parents: 0539b72
Author: Robert Kanter 
Authored: Mon Apr 23 15:44:15 2018 -0700
Committer: Robert Kanter 
Committed: Mon Apr 23 15:57:44 2018 -0700

--
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java   | 12 ++--
 .../security/DelegationTokenRenewer.java |  4 
 .../security/TestDelegationTokenRenewer.java | 19 +++
 3 files changed, 29 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e5b4b4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
index 908ad39..1fff2f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
@@ -58,9 +58,9 @@ public class KMSTokenRenewer extends TokenRenewer {
 try {
   if (!(keyProvider instanceof
   KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-LOG.warn("keyProvider {} cannot renew token {}.",
-keyProvider == null ? "null" : keyProvider.getClass(), token);
-return 0;
+throw new IOException(String
+.format("keyProvider %s cannot renew token [%s]",
+keyProvider == null ? "null" : keyProvider.getClass(), token));
   }
   return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
   keyProvider).renewDelegationToken(token);
@@ -78,9 +78,9 @@ public class KMSTokenRenewer extends TokenRenewer {
 try {
   if (!(keyProvider instanceof
   KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-LOG.warn("keyProvider {} cannot cancel token {}.",
-keyProvider == null ? "null" : keyProvider.getClass(), token);
-return;
+throw new IOException(String
+.format("keyProvider %s cannot cancel token [%s]",
+keyProvider == null ? "null" : keyProvider.getClass(), token));
   }
   ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
   keyProvider).cancelDelegationToken(token);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e5b4b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index abb8d59..220787c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -592,6 +592,10 @@ public class DelegationTokenRenewer extends 
AbstractService {
   throws IOException {
 // calculate timer time
 long expiresIn = token.expirationDate - System.currentTimeMillis();
+if (expiresIn <= 0) {
+  LOG.info("Will not renew token " + token);
+  return;
+}
 long renewIn = token.expirationDate - expiresIn/10; // little bit before 
the expiration
 // need to create new task every time
 RenewalTimerTask tTask = new RenewalTimerTask(token);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74e5b4b4/hadoop-yarn-pr

hadoop git commit: HADOOP-15390. Yarn RM logs flooded by DelegationTokenRenewer trying to renew KMS tokens (xiaochen via rkanter)

2018-04-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 463fcfb50 -> 101d9005d


HADOOP-15390. Yarn RM logs flooded by DelegationTokenRenewer trying to renew 
KMS tokens (xiaochen via rkanter)

(cherry picked from commit 7ab08a9c37a76edbe02d556fcfb2e637f45afc21)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/101d9005
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/101d9005
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/101d9005

Branch: refs/heads/branch-3.0
Commit: 101d9005dd07faeaf18f6e7c69c54d645cd214a9
Parents: 463fcfb
Author: Robert Kanter 
Authored: Mon Apr 23 15:44:15 2018 -0700
Committer: Robert Kanter 
Committed: Mon Apr 23 15:53:53 2018 -0700

--
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java   | 12 ++--
 .../security/DelegationTokenRenewer.java |  4 
 .../security/TestDelegationTokenRenewer.java | 19 +++
 3 files changed, 29 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/101d9005/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
index 908ad39..1fff2f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
@@ -58,9 +58,9 @@ public class KMSTokenRenewer extends TokenRenewer {
 try {
   if (!(keyProvider instanceof
   KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-LOG.warn("keyProvider {} cannot renew token {}.",
-keyProvider == null ? "null" : keyProvider.getClass(), token);
-return 0;
+throw new IOException(String
+.format("keyProvider %s cannot renew token [%s]",
+keyProvider == null ? "null" : keyProvider.getClass(), token));
   }
   return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
   keyProvider).renewDelegationToken(token);
@@ -78,9 +78,9 @@ public class KMSTokenRenewer extends TokenRenewer {
 try {
   if (!(keyProvider instanceof
   KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-LOG.warn("keyProvider {} cannot cancel token {}.",
-keyProvider == null ? "null" : keyProvider.getClass(), token);
-return;
+throw new IOException(String
+.format("keyProvider %s cannot cancel token [%s]",
+keyProvider == null ? "null" : keyProvider.getClass(), token));
   }
   ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
   keyProvider).cancelDelegationToken(token);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/101d9005/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index abb8d59..220787c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -592,6 +592,10 @@ public class DelegationTokenRenewer extends 
AbstractService {
   throws IOException {
 // calculate timer time
 long expiresIn = token.expirationDate - System.currentTimeMillis();
+if (expiresIn <= 0) {
+  LOG.info("Will not renew token " + token);
+  return;
+}
 long renewIn = token.expirationDate - expiresIn/10; // little bit before 
the expiration
 // need to create new task every time
 RenewalTimerTask tTask = new RenewalTimerTask(token);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/101d9005/hadoop-yarn-pr

hadoop git commit: HADOOP-15390. Yarn RM logs flooded by DelegationTokenRenewer trying to renew KMS tokens (xiaochen via rkanter)

2018-04-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 caf4d5000 -> ea7ad5049


HADOOP-15390. Yarn RM logs flooded by DelegationTokenRenewer trying to renew 
KMS tokens (xiaochen via rkanter)

(cherry picked from commit 7ab08a9c37a76edbe02d556fcfb2e637f45afc21)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea7ad504
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea7ad504
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea7ad504

Branch: refs/heads/branch-3.1
Commit: ea7ad50499db98d687a3bbdebcbda201fae20b55
Parents: caf4d50
Author: Robert Kanter 
Authored: Mon Apr 23 15:44:15 2018 -0700
Committer: Robert Kanter 
Committed: Mon Apr 23 16:25:41 2018 -0700

--
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java   | 12 ++--
 .../security/DelegationTokenRenewer.java |  4 
 .../security/TestDelegationTokenRenewer.java | 19 +++
 3 files changed, 29 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea7ad504/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
index 908ad39..1fff2f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
@@ -58,9 +58,9 @@ public class KMSTokenRenewer extends TokenRenewer {
 try {
   if (!(keyProvider instanceof
   KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-LOG.warn("keyProvider {} cannot renew token {}.",
-keyProvider == null ? "null" : keyProvider.getClass(), token);
-return 0;
+throw new IOException(String
+.format("keyProvider %s cannot renew token [%s]",
+keyProvider == null ? "null" : keyProvider.getClass(), token));
   }
   return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
   keyProvider).renewDelegationToken(token);
@@ -78,9 +78,9 @@ public class KMSTokenRenewer extends TokenRenewer {
 try {
   if (!(keyProvider instanceof
   KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-LOG.warn("keyProvider {} cannot cancel token {}.",
-keyProvider == null ? "null" : keyProvider.getClass(), token);
-return;
+throw new IOException(String
+.format("keyProvider %s cannot cancel token [%s]",
+keyProvider == null ? "null" : keyProvider.getClass(), token));
   }
   ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
   keyProvider).cancelDelegationToken(token);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea7ad504/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index abb8d59..220787c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -592,6 +592,10 @@ public class DelegationTokenRenewer extends 
AbstractService {
   throws IOException {
 // calculate timer time
 long expiresIn = token.expirationDate - System.currentTimeMillis();
+if (expiresIn <= 0) {
+  LOG.info("Will not renew token " + token);
+  return;
+}
 long renewIn = token.expirationDate - expiresIn/10; // little bit before 
the expiration
 // need to create new task every time
 RenewalTimerTask tTask = new RenewalTimerTask(token);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea7ad504/hadoop-yarn-pr

hadoop git commit: HADOOP-15390. Yarn RM logs flooded by DelegationTokenRenewer trying to renew KMS tokens (xiaochen via rkanter)

2018-04-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 989a3929a -> 7ab08a9c3


HADOOP-15390. Yarn RM logs flooded by DelegationTokenRenewer trying to renew 
KMS tokens (xiaochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ab08a9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ab08a9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ab08a9c

Branch: refs/heads/trunk
Commit: 7ab08a9c37a76edbe02d556fcfb2e637f45afc21
Parents: 989a392
Author: Robert Kanter 
Authored: Mon Apr 23 15:44:15 2018 -0700
Committer: Robert Kanter 
Committed: Mon Apr 23 15:44:15 2018 -0700

--
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java   | 12 ++--
 .../security/DelegationTokenRenewer.java |  4 
 .../security/TestDelegationTokenRenewer.java | 19 +++
 3 files changed, 29 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ab08a9c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
index 908ad39..1fff2f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSTokenRenewer.java
@@ -58,9 +58,9 @@ public class KMSTokenRenewer extends TokenRenewer {
 try {
   if (!(keyProvider instanceof
   KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-LOG.warn("keyProvider {} cannot renew token {}.",
-keyProvider == null ? "null" : keyProvider.getClass(), token);
-return 0;
+throw new IOException(String
+.format("keyProvider %s cannot renew token [%s]",
+keyProvider == null ? "null" : keyProvider.getClass(), token));
   }
   return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
   keyProvider).renewDelegationToken(token);
@@ -78,9 +78,9 @@ public class KMSTokenRenewer extends TokenRenewer {
 try {
   if (!(keyProvider instanceof
   KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-LOG.warn("keyProvider {} cannot cancel token {}.",
-keyProvider == null ? "null" : keyProvider.getClass(), token);
-return;
+throw new IOException(String
+.format("keyProvider %s cannot cancel token [%s]",
+keyProvider == null ? "null" : keyProvider.getClass(), token));
   }
   ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
   keyProvider).cancelDelegationToken(token);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ab08a9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index abb8d59..220787c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -592,6 +592,10 @@ public class DelegationTokenRenewer extends 
AbstractService {
   throws IOException {
 // calculate timer time
 long expiresIn = token.expirationDate - System.currentTimeMillis();
+if (expiresIn <= 0) {
+  LOG.info("Will not renew token " + token);
+  return;
+}
 long renewIn = token.expirationDate - expiresIn/10; // little bit before 
the expiration
 // need to create new task every time
 RenewalTimerTask tTask = new RenewalTimerTask(token);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ab08a9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/

hadoop git commit: MAPREDUCE-6441. Improve temporary directory name generation in LocalDistributedCacheManager for concurrent processes (wattsinabox, rchiang, haibochen via rkanter)

2018-03-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk fcea5a4d7 -> edb202e49


MAPREDUCE-6441. Improve temporary directory name generation in 
LocalDistributedCacheManager for concurrent processes (wattsinabox, rchiang, 
haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edb202e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edb202e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edb202e4

Branch: refs/heads/trunk
Commit: edb202e4934be750e43103c047752b97c5eafc94
Parents: fcea5a4
Author: Robert Kanter 
Authored: Mon Mar 26 14:55:53 2018 -0700
Committer: Robert Kanter 
Committed: Mon Mar 26 14:55:53 2018 -0700

--
 .../mapred/LocalDistributedCacheManager.java| 13 ++-
 .../apache/hadoop/mapred/LocalJobRunner.java|  2 +-
 .../TestLocalDistributedCacheManager.java   | 95 +++-
 3 files changed, 78 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edb202e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
index 2a14ec3..bcf73d1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
@@ -37,7 +37,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.atomic.AtomicLong;
+import java.util.UUID;
 
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileUtil;
@@ -82,7 +82,7 @@ class LocalDistributedCacheManager {
* @param conf
* @throws IOException
*/
-  public void setup(JobConf conf) throws IOException {
+  public void setup(JobConf conf, JobID jobId) throws IOException {
 File workDir = new File(System.getProperty("user.dir"));
 
 // Generate YARN local resources objects corresponding to the distributed
@@ -91,9 +91,7 @@ class LocalDistributedCacheManager {
   new LinkedHashMap();
 MRApps.setupDistributedCache(conf, localResources);
 // Generating unique numbers for FSDownload.
-AtomicLong uniqueNumberGenerator =
-new AtomicLong(System.currentTimeMillis());
-
+
 // Find which resources are to be put on the local classpath
 Map classpaths = new HashMap();
 Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
@@ -124,9 +122,10 @@ class LocalDistributedCacheManager {
   Path destPath = localDirAllocator.getLocalPathForWrite(".", conf);
   Map> resourcesToPaths = Maps.newHashMap();
   for (LocalResource resource : localResources.values()) {
+Path destPathForDownload = new Path(destPath,
+jobId.toString() + "_" + UUID.randomUUID().toString());
 Callable download =
-new FSDownload(localFSFileContext, ugi, conf, new Path(destPath,
-Long.toString(uniqueNumberGenerator.incrementAndGet())),
+new FSDownload(localFSFileContext, ugi, conf, destPathForDownload,
 resource);
 Future future = exec.submit(download);
 resourcesToPaths.put(resource, future);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/edb202e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
index 5e7a250..2ab4e76 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
@@ -169,7 +169,7 @@ public class Loca

hadoop git commit: HADOOP-15235. Authentication Tokens should use HMAC instead of MAC (rkanter)

2018-02-20 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 49ed7d7fc -> 14f5797ef


HADOOP-15235. Authentication Tokens should use HMAC instead of MAC (rkanter)

(cherry picked from commit 324e5a7cf2bdb6f93e7c6fd9023817528f243dcf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14f5797e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14f5797e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14f5797e

Branch: refs/heads/branch-2
Commit: 14f5797eff375c89d5cd7e013441514c35e7ac86
Parents: 49ed7d7
Author: Robert Kanter 
Authored: Tue Feb 20 17:24:37 2018 -0800
Committer: Robert Kanter 
Committed: Tue Feb 20 17:25:12 2018 -0800

--
 .../security/authentication/util/Signer.java| 22 +---
 1 file changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14f5797e/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
index aa63e40..e7b19a4 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
@@ -14,8 +14,11 @@
 package org.apache.hadoop.security.authentication.util;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.codec.binary.StringUtils;
 
-import java.nio.charset.Charset;
+import javax.crypto.Mac;
+import javax.crypto.spec.SecretKeySpec;
+import java.security.InvalidKeyException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 
@@ -24,6 +27,7 @@ import java.security.NoSuchAlgorithmException;
  */
 public class Signer {
   private static final String SIGNATURE = "&s=";
+  private static final String SIGNING_ALGORITHM = "HmacSHA256";
 
   private SignerSecretProvider secretProvider;
 
@@ -86,25 +90,27 @@ public class Signer {
*/
   protected String computeSignature(byte[] secret, String str) {
 try {
-  MessageDigest md = MessageDigest.getInstance("SHA");
-  md.update(str.getBytes(Charset.forName("UTF-8")));
-  md.update(secret);
-  byte[] digest = md.digest();
-  return new Base64(0).encodeToString(digest);
-} catch (NoSuchAlgorithmException ex) {
+  SecretKeySpec key = new SecretKeySpec((secret), SIGNING_ALGORITHM);
+  Mac mac = Mac.getInstance(SIGNING_ALGORITHM);
+  mac.init(key);
+  byte[] sig = mac.doFinal(StringUtils.getBytesUtf8(str));
+  return new Base64(0).encodeToString(sig);
+} catch (NoSuchAlgorithmException | InvalidKeyException ex) {
   throw new RuntimeException("It should not happen, " + ex.getMessage(), 
ex);
 }
   }
 
   protected void checkSignatures(String rawValue, String originalSignature)
   throws SignerException {
+byte[] orginalSignatureBytes = StringUtils.getBytesUtf8(originalSignature);
 boolean isValid = false;
 byte[][] secrets = secretProvider.getAllSecrets();
 for (int i = 0; i < secrets.length; i++) {
   byte[] secret = secrets[i];
   if (secret != null) {
 String currentSignature = computeSignature(secret, rawValue);
-if (originalSignature.equals(currentSignature)) {
+if (MessageDigest.isEqual(orginalSignatureBytes,
+StringUtils.getBytesUtf8(currentSignature))) {
   isValid = true;
   break;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15235. Authentication Tokens should use HMAC instead of MAC (rkanter)

2018-02-20 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 84cea0011 -> 324e5a7cf


HADOOP-15235. Authentication Tokens should use HMAC instead of MAC (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/324e5a7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/324e5a7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/324e5a7c

Branch: refs/heads/trunk
Commit: 324e5a7cf2bdb6f93e7c6fd9023817528f243dcf
Parents: 84cea00
Author: Robert Kanter 
Authored: Tue Feb 20 17:24:37 2018 -0800
Committer: Robert Kanter 
Committed: Tue Feb 20 17:24:37 2018 -0800

--
 .../security/authentication/util/Signer.java| 22 +---
 1 file changed, 14 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/324e5a7c/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
index aa63e40..e7b19a4 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
@@ -14,8 +14,11 @@
 package org.apache.hadoop.security.authentication.util;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.codec.binary.StringUtils;
 
-import java.nio.charset.Charset;
+import javax.crypto.Mac;
+import javax.crypto.spec.SecretKeySpec;
+import java.security.InvalidKeyException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 
@@ -24,6 +27,7 @@ import java.security.NoSuchAlgorithmException;
  */
 public class Signer {
   private static final String SIGNATURE = "&s=";
+  private static final String SIGNING_ALGORITHM = "HmacSHA256";
 
   private SignerSecretProvider secretProvider;
 
@@ -86,25 +90,27 @@ public class Signer {
*/
   protected String computeSignature(byte[] secret, String str) {
 try {
-  MessageDigest md = MessageDigest.getInstance("SHA");
-  md.update(str.getBytes(Charset.forName("UTF-8")));
-  md.update(secret);
-  byte[] digest = md.digest();
-  return new Base64(0).encodeToString(digest);
-} catch (NoSuchAlgorithmException ex) {
+  SecretKeySpec key = new SecretKeySpec((secret), SIGNING_ALGORITHM);
+  Mac mac = Mac.getInstance(SIGNING_ALGORITHM);
+  mac.init(key);
+  byte[] sig = mac.doFinal(StringUtils.getBytesUtf8(str));
+  return new Base64(0).encodeToString(sig);
+} catch (NoSuchAlgorithmException | InvalidKeyException ex) {
   throw new RuntimeException("It should not happen, " + ex.getMessage(), 
ex);
 }
   }
 
   protected void checkSignatures(String rawValue, String originalSignature)
   throws SignerException {
+byte[] orginalSignatureBytes = StringUtils.getBytesUtf8(originalSignature);
 boolean isValid = false;
 byte[][] secrets = secretProvider.getAllSecrets();
 for (int i = 0; i < secrets.length; i++) {
   byte[] secret = secrets[i];
   if (secret != null) {
 String currentSignature = computeSignature(secret, rawValue);
-if (originalSignature.equals(currentSignature)) {
+if (MessageDigest.isEqual(orginalSignatureBytes,
+StringUtils.getBytesUtf8(currentSignature))) {
   isValid = true;
   break;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-7841. Cleanup AllocationFileLoaderService's reloadAllocations method (snemeth via rkanter)

2018-02-06 Thread rkanter
YARN-7841. Cleanup AllocationFileLoaderService's reloadAllocations method 
(snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/814d701d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/814d701d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/814d701d

Branch: refs/heads/trunk
Commit: 814d701d46b4ff87f6ec94ba39667c80475c38d7
Parents: 4304fcd
Author: Robert Kanter 
Authored: Tue Feb 6 14:36:49 2018 -0800
Committer: Robert Kanter 
Committed: Tue Feb 6 14:36:49 2018 -0800

--
 .../scheduler/fair/AllocationConfiguration.java |  99 ++--
 .../fair/AllocationFileLoaderService.java   | 481 ---
 .../fair/allocation/AllocationFileParser.java   | 258 ++
 .../allocation/AllocationFileQueueParser.java   | 268 +++
 .../fair/allocation/QueueProperties.java| 280 +++
 .../fair/TestAllocationFileLoaderService.java   | 187 ---
 .../allocationfile/AllocationFileQueue.java |  82 
 .../AllocationFileQueueBuilder.java | 115 +
 .../AllocationFileQueueProperties.java  | 202 
 .../AllocationFileSimpleQueueBuilder.java   |  64 +++
 .../AllocationFileSubQueueBuilder.java  |  54 +++
 .../allocationfile/AllocationFileWriter.java| 175 +++
 .../fair/allocationfile/UserSettings.java   |  80 +++
 13 files changed, 1798 insertions(+), 547 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/814d701d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index 3505bca..c98aadc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.security.AccessType;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocation.AllocationFileParser;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocation.QueueProperties;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -47,7 +49,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   private final Map maxChildQueueResources;
   // Sharing weights for each queue
   private final Map queueWeights;
-  
+
   // Max concurrent running applications for each queue and for each user; in 
addition,
   // for users that have no max specified, we use the userMaxJobsDefault.
   @VisibleForTesting
@@ -88,13 +90,13 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   private final Set reservableQueues;
 
   private final Map schedulingPolicies;
-  
+
   private final SchedulingPolicy defaultSchedulingPolicy;
-  
+
   // Policy for mapping apps to queues
   @VisibleForTesting
   QueuePlacementPolicy placementPolicy;
-  
+
   //Configured queues in the alloc xml
   @VisibleForTesting
   Map> configuredQueues;
@@ -104,53 +106,42 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 
   private final Set nonPreemptableQueues;
 
-  public AllocationConfiguration(Map minQueueResources,
-  Map maxQueueResources,
-  Map maxChildQueueResources,
-  Map queueMaxApps,
-  Map userMaxApps,
-  Map queueWeights,
-  Map queueMaxAMShares, int userMaxAppsDefault,
-  int queueMaxAppsDefault,
-  ConfigurableResource queueMaxResourcesDefault,
-  float queueMaxAMShareDefault,
-  Map schedulingPolicies,
-  SchedulingPolicy defaultSchedulingPolicy,
-  Map minSharePreemptionTimeouts,
-  Map fairSharePreemptionTimeouts,
-  Map fairSharePreemptionThresholds,
-  Map> queueAcls,
-  M

[1/2] hadoop git commit: YARN-7841. Cleanup AllocationFileLoaderService's reloadAllocations method (snemeth via rkanter)

2018-02-06 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4304fcd5b -> 814d701d4


http://git-wip-us.apache.org/repos/asf/hadoop/blob/814d701d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
index c46ecd9..4a7461d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
@@ -27,12 +27,12 @@ import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.NestedUserQueue;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile.AllocationFileWriter;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy;
 import org.apache.hadoop.yarn.util.ControlledClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Test;
-
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
@@ -43,7 +43,6 @@ import java.net.URISyntaxException;
 import java.net.URL;
 import java.nio.charset.StandardCharsets;
 import java.util.List;
-
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
@@ -51,7 +50,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class TestAllocationFileLoaderService {
-  
+
   final static String TEST_DIR = new File(System.getProperty("test.build.data",
   "/tmp")).getAbsolutePath();
 
@@ -112,7 +111,7 @@ public class TestAllocationFileLoaderService {
   fail("Unable to access allocation file from classpath: " + e);
 }
   }
-  
+
   @Test (timeout = 1)
   public void testReload() throws Exception {
 PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
@@ -127,7 +126,7 @@ public class TestAllocationFileLoaderService {
 out.println("  ");
 out.println("");
 out.close();
-
+
 ControlledClock clock = new ControlledClock();
 clock.setTime(0);
 Configuration conf = new Configuration();
@@ -141,7 +140,7 @@ public class TestAllocationFileLoaderService {
 allocLoader.setReloadListener(confHolder);
 allocLoader.reloadAllocations();
 AllocationConfiguration allocConf = confHolder.allocConf;
-
+
 // Verify conf
 QueuePlacementPolicy policy = allocConf.getPlacementPolicy();
 List rules = policy.getRules();
@@ -154,9 +153,9 @@ public class TestAllocationFileLoaderService {
 .contains("root.queueA"));
 assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF)
 .contains("root.queueB"));
-
+
 confHolder.allocConf = null;
-
+
 // Modify file and advance the clock
 out = new PrintWriter(new FileWriter(ALLOC_FILE));
 out.println("");
@@ -166,22 +165,22 @@ public class TestAllocationFileLoaderService {
 out.println("  ");
 out.println("  ");
 out.println("");
-out.println("");  
+out.println("");
 out.println(" ");
 out.println("");
 out.println("");
 out.println("  ");
 out.println("");
 out.close();
-
+
 clock.tickMsec(System.currentTimeMillis()
 + AllocationFileLoaderService.ALLOC_RELOAD_WAIT_MS + 1);
 allocLoader.start();
-
+
 while (confHolder.allocConf == null) {
   Thread.sleep(20);
 }
-
+
 // Verify conf
 allocConf = confHolder.allocConf;
 policy = allocConf.getPlacementPolicy();
@@ -199,91 +198,89 @@ public class TestAllocationFileLoaderService {
 assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF)
 .contains("root.queueB"));
   }
-  
+
   @Test
   public void testAllocationFileParsing() throws Exception {
 Configuration conf = new Configuration();
 conf.set(FairSchedulerConfig

hadoop git commit: MAPREDUCE-6995. Uploader tool for Distributed Cache Deploy documentation (miklos.szeg...@cloudera.com via rkanter)

2018-01-19 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 62c9e7fa9 -> 836643d79


MAPREDUCE-6995. Uploader tool for Distributed Cache Deploy documentation 
(miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/836643d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/836643d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/836643d7

Branch: refs/heads/trunk
Commit: 836643d793c68bf1bee883abece84f024591da7c
Parents: 62c9e7f
Author: Robert Kanter 
Authored: Fri Jan 19 17:55:24 2018 -0800
Committer: Robert Kanter 
Committed: Fri Jan 19 17:57:54 2018 -0800

--
 .../site/markdown/DistributedCacheDeploy.md.vm  | 61 ++---
 .../src/site/markdown/MapredCommands.md | 19 ++
 .../mapred/uploader/FrameworkUploader.java  | 48 +
 .../mapred/uploader/TestFrameworkUploader.java  | 72 
 4 files changed, 178 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/836643d7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
index c69be1c..4552235 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
@@ -12,10 +12,6 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
-#set ( $H3 = '###' )
-#set ( $H4 = '' )
-#set ( $H5 = '#' )
-
 Hadoop: Distributed Cache Deploy
 
 
@@ -55,23 +51,41 @@ Deploying a new MapReduce version consists of three steps:
 1.  Upload the MapReduce archive to a location that can be accessed by the
 job submission client. Ideally the archive should be on the cluster's 
default
 filesystem at a publicly-readable path. See the archive location discussion
-below for more details.
+below for more details. You can use the framework uploader tool to perform
+this step like
+`mapred frameworkuploader -target
+
hdfs:///mapred/framework/hadoop-mapreduce-${project.version}.tar#mrframework`.
+It will select the jar files that are in the classpath and put them into
+a tar archive specified by the -target and -fs options. The tool then 
returns
+a suggestion of how to set `mapreduce.application.framework.path` and
+`mapreduce.application.classpath`.
+
+`-fs`: The target file system. Defaults to the default filesystem set by
+`fs.defaultFS`.
+
+`-target` is the target location of the framework tarball, optionally 
followed
+ by a # with the localized alias. It then uploads the tar to the specified
+ directory. gzip is not needed since the jar files are already compressed.
+ Make sure the target directory is readable by all users but it is not
+ writable by others than administrators to protect cluster security.
 
 2.  Configure `mapreduce.application.framework.path` to point to the
 location where the archive is located. As when specifying distributed cache
 files for a job, this is a URL that also supports creating an alias for the
 archive if a URL fragment is specified. For example,
-
`hdfs:/mapred/framework/hadoop-mapreduce-${project.version}.tar.gz#mrframework`
+
`hdfs:///mapred/framework/hadoop-mapreduce-${project.version}.tar.gz#mrframework`
 will be localized as `mrframework` rather than
 `hadoop-mapreduce-${project.version}.tar.gz`.
 
 3.  Configure `mapreduce.application.classpath` to set the proper
-classpath to use with the MapReduce archive configured above. NOTE: An 
error
+classpath to use with the MapReduce archive configured above.
+If the `frameworkuploader` tool is used, it uploads all dependencies
+and returns the value that needs to be configured here. NOTE: An error
 occurs if `mapreduce.application.framework.path` is configured but
 `mapreduce.application.classpath` does not reference the base name of the
 archive path or the alias if an alias was specified.
 
-$H3 Location of the MapReduce Archive and How It Affects Job Performance
+### Location of the MapReduce Archive and How It Affects Job Performance
 
 Note that the location of the MapReduce archive can be critical to job 
submission and job startup p

hadoop git commit: MAPREDUCE-7032. Add the ability to specify a delayed replication count (miklos.szeg...@cloudera.com via rkanter)

2018-01-16 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5ac109909 -> d716084f4


MAPREDUCE-7032. Add the ability to specify a delayed replication count 
(miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d716084f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d716084f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d716084f

Branch: refs/heads/trunk
Commit: d716084f4503bf826ef10424d7025ea1ff4ee104
Parents: 5ac1099
Author: Robert Kanter 
Authored: Tue Jan 16 10:45:45 2018 -0800
Committer: Robert Kanter 
Committed: Tue Jan 16 10:45:45 2018 -0800

--
 .../mapred/uploader/FrameworkUploader.java  | 124 +--
 .../mapred/uploader/TestFrameworkUploader.java  |  21 +++-
 2 files changed, 128 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d716084f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
index 899689d..ee482d7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
@@ -25,6 +25,8 @@ import org.apache.commons.cli.Options;
 import org.apache.commons.compress.archivers.ArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -43,6 +45,8 @@ import java.io.OutputStream;
 import java.nio.file.Files;
 import java.nio.file.NotLinkException;
 import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -73,7 +77,15 @@ public class FrameworkUploader implements Runnable {
   @VisibleForTesting
   String target = null;
   @VisibleForTesting
-  short replication = 10;
+  Path targetPath = null;
+  @VisibleForTesting
+  short initialReplication = 3;
+  @VisibleForTesting
+  short finalReplication = 10;
+  @VisibleForTesting
+  short acceptableReplication = 9;
+  @VisibleForTesting
+  int timeout = 10;
   private boolean ignoreSymlink = false;
 
   @VisibleForTesting
@@ -101,9 +113,10 @@ public class FrameworkUploader implements Runnable {
   LOG.info(
   "Suggested mapreduce.application.classpath $PWD/" + alias + "/*");
   System.out.println("Suggested classpath $PWD/" + alias + "/*");
-} catch (UploaderException|IOException e) {
+} catch (UploaderException|IOException|InterruptedException e) {
   LOG.error("Error in execution " + e.getMessage());
   e.printStackTrace();
+  throw new RuntimeException(e);
 }
   }
 
@@ -147,7 +160,7 @@ public class FrameworkUploader implements Runnable {
 if (targetStream == null) {
   validateTargetPath();
   int lastIndex = target.indexOf('#');
-  Path targetPath =
+  targetPath =
   new Path(
   target.substring(
   0, lastIndex == -1 ? target.length() : lastIndex));
@@ -160,7 +173,7 @@ public class FrameworkUploader implements Runnable {
   targetStream = null;
   if (fileSystem instanceof DistributedFileSystem) {
 LOG.info("Set replication to " +
-replication + " for path: " + targetPath);
+initialReplication + " for path: " + targetPath);
 LOG.info("Disabling Erasure Coding for path: " + targetPath);
 DistributedFileSystem dfs = (DistributedFileSystem)fileSystem;
 DistributedFileSystem.HdfsDataOutputStreamBuilder builder =
@@ -168,13 +181,13 @@ public class FrameworkUploader implements Runnable {
 .overwrite(true)
 .ecPolicyName(
 SystemErasureCodingPolicies.getReplicationPolicy().getName());
-if (replication > 0) {
-  builder.replication(replication);
+if (initialReplication &

hadoop git commit: YARN-7479. TestContainerManagerSecurity.testContainerManager[Simple] flaky in trunk (ajisakaa via rkanter)

2018-01-16 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d03115d02 -> e54c65a32


YARN-7479. TestContainerManagerSecurity.testContainerManager[Simple] flaky in 
trunk (ajisakaa via rkanter)

(cherry picked from commit 5ac109909a29fab30363b752b5215be7f5dc616b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e54c65a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e54c65a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e54c65a3

Branch: refs/heads/branch-3.0
Commit: e54c65a32663b717d8c91abc259f0a41c55e66da
Parents: d03115d
Author: Robert Kanter 
Authored: Tue Jan 16 10:16:42 2018 -0800
Committer: Robert Kanter 
Committed: Tue Jan 16 10:21:12 2018 -0800

--
 .../server/TestContainerManagerSecurity.java | 19 +++
 1 file changed, 7 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e54c65a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index ad2f68a..77a021c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -30,12 +30,10 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 
-import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -52,7 +50,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -409,9 +406,10 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   }
 
   private void waitForContainerToFinishOnNM(ContainerId containerId)
-  throws TimeoutException, InterruptedException {
+  throws InterruptedException {
 Context nmContext = yarnCluster.getNodeManager(0).getNMContext();
-int interval = 4 * 60; // Max time for container token to expire.
+// Max time for container token to expire.
+final int timeout = 4 * 60 * 1000;
 
 // If the container is null, then it has already completed and been removed
 // from the Context by asynchronous calls.
@@ -420,14 +418,11 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   try {
 LOG.info("Waiting for " + containerId + " to get to state " +
 ContainerState.COMPLETE);
-GenericTestUtils.waitFor(new Supplier() {
-  @Override
-  public Boolean get() {
-return ContainerState.COMPLETE.equals(
-waitContainer.cloneAndGetContainerStatus().getState());
-  }
-}, 10, interval);
+GenericTestUtils.waitFor(() -> ContainerState.COMPLETE.equals(
+waitContainer.cloneAndGetContainerStatus().getState()),
+500, timeout);
   } catch (TimeoutException te) {
+LOG.error("TimeoutException", te);
 fail("Was waiting for " + containerId + " to get to state " +
 ContainerState.COMPLETE + " but was in state " +
 waitContainer.cloneAndGetContainerStatus().getState() +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7479. TestContainerManagerSecurity.testContainerManager[Simple] flaky in trunk (ajisakaa via rkanter)

2018-01-16 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk b20293535 -> 5ac109909


YARN-7479. TestContainerManagerSecurity.testContainerManager[Simple] flaky in 
trunk (ajisakaa via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ac10990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ac10990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ac10990

Branch: refs/heads/trunk
Commit: 5ac109909a29fab30363b752b5215be7f5dc616b
Parents: b202935
Author: Robert Kanter 
Authored: Tue Jan 16 10:16:42 2018 -0800
Committer: Robert Kanter 
Committed: Tue Jan 16 10:16:42 2018 -0800

--
 .../server/TestContainerManagerSecurity.java | 19 +++
 1 file changed, 7 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ac10990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index ad2f68a..77a021c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -30,12 +30,10 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 
-import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -52,7 +50,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -409,9 +406,10 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   }
 
   private void waitForContainerToFinishOnNM(ContainerId containerId)
-  throws TimeoutException, InterruptedException {
+  throws InterruptedException {
 Context nmContext = yarnCluster.getNodeManager(0).getNMContext();
-int interval = 4 * 60; // Max time for container token to expire.
+// Max time for container token to expire.
+final int timeout = 4 * 60 * 1000;
 
 // If the container is null, then it has already completed and been removed
 // from the Context by asynchronous calls.
@@ -420,14 +418,11 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   try {
 LOG.info("Waiting for " + containerId + " to get to state " +
 ContainerState.COMPLETE);
-GenericTestUtils.waitFor(new Supplier() {
-  @Override
-  public Boolean get() {
-return ContainerState.COMPLETE.equals(
-waitContainer.cloneAndGetContainerStatus().getState());
-  }
-}, 10, interval);
+GenericTestUtils.waitFor(() -> ContainerState.COMPLETE.equals(
+waitContainer.cloneAndGetContainerStatus().getState()),
+500, timeout);
   } catch (TimeoutException te) {
+LOG.error("TimeoutException", te);
 fail("Was waiting for " + containerId + " to get to state " +
 ContainerState.COMPLETE + " but was in state " +
 waitContainer.cloneAndGetContainerStatus().getState() +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7030. Uploader tool should ignore symlinks to the same directory (miklos.szeg...@cloudera.com via rkanter)

2018-01-12 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk c05b84703 -> e404650f4


MAPREDUCE-7030. Uploader tool should ignore symlinks to the same directory 
(miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e404650f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e404650f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e404650f

Branch: refs/heads/trunk
Commit: e404650f489727d2df9a8813fddc4e0d682fbbee
Parents: c05b847
Author: Robert Kanter 
Authored: Fri Jan 12 14:18:01 2018 -0800
Committer: Robert Kanter 
Committed: Fri Jan 12 14:18:01 2018 -0800

--
 .../mapred/uploader/FrameworkUploader.java  | 50 +-
 .../mapred/uploader/TestFrameworkUploader.java  | 53 
 2 files changed, 101 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e404650f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
index a374262..899689d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
@@ -40,6 +40,9 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.NotLinkException;
+import java.nio.file.Paths;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -71,6 +74,7 @@ public class FrameworkUploader implements Runnable {
   String target = null;
   @VisibleForTesting
   short replication = 10;
+  private boolean ignoreSymlink = false;
 
   @VisibleForTesting
   Set filteredInputFiles = new HashSet<>();
@@ -79,8 +83,7 @@ public class FrameworkUploader implements Runnable {
   @VisibleForTesting
   List blacklistedFiles = new LinkedList<>();
 
-  @VisibleForTesting
-  OutputStream targetStream = null;
+  private OutputStream targetStream = null;
   private String alias = null;
 
   private void printHelp(Options options) {
@@ -284,6 +287,9 @@ public class FrameworkUploader implements Runnable {
 break;
   }
 }
+if (ignoreSymlink && !excluded) {
+  excluded = checkSymlink(jar);
+}
 if (found && !excluded) {
   LOG.info("Whitelisted " + jar.getAbsolutePath());
   if (!filteredInputFiles.add(jar.getAbsolutePath())) {
@@ -299,6 +305,40 @@ public class FrameworkUploader implements Runnable {
 }
   }
 
+  /**
+   * Check if the file is a symlink to the same directory.
+   * @param jar The file to check
+   * @return true, to ignore the directory
+   */
+  @VisibleForTesting
+  boolean checkSymlink(File jar) {
+if (Files.isSymbolicLink(jar.toPath())) {
+  try {
+java.nio.file.Path link = Files.readSymbolicLink(jar.toPath());
+java.nio.file.Path jarPath = Paths.get(jar.getAbsolutePath());
+String linkString = link.toString();
+java.nio.file.Path jarParent = jarPath.getParent();
+java.nio.file.Path linkPath =
+jarParent == null ? null : jarParent.resolve(linkString);
+java.nio.file.Path linkPathParent =
+linkPath == null ? null : linkPath.getParent();
+java.nio.file.Path normalizedLinkPath =
+linkPathParent == null ? null : linkPathParent.normalize();
+if (normalizedLinkPath != null && jarParent.equals(
+normalizedLinkPath)) {
+  LOG.info(String.format("Ignoring same directory link %s to %s",
+  jarPath.toString(), link.toString()));
+  return true;
+}
+  } catch (NotLinkException ex) {
+LOG.debug("Not a link", jar);
+  } catch (IOException ex) {
+LOG.warn("Cannot read symbolic link on", jar);
+  }
+}
+return false;
+  }
+
   private void validateTargetPath() throws UploaderException {
 if (!target.startsWith("hdfs:/") &&
 !target.startsWith("file:/")) {
@@ -340,6 +380,9 @@ public class FrameworkUploader implements Runnable {
 .withDescriptio

hadoop git commit: YARN-7622. Allow fair-scheduler configuration on HDFS (gphillips via rkanter)

2018-01-10 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 679ad2ced -> ff67c68d3


YARN-7622. Allow fair-scheduler configuration on HDFS (gphillips via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff67c68d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff67c68d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff67c68d

Branch: refs/heads/branch-2
Commit: ff67c68d3c29e29ace96da09f18e18eba1f1
Parents: 679ad2c
Author: Robert Kanter 
Authored: Wed Jan 10 15:03:33 2018 -0800
Committer: Robert Kanter 
Committed: Wed Jan 10 15:03:33 2018 -0800

--
 .../fair/AllocationFileLoaderService.java   | 118 +++
 .../fair/TestAllocationFileLoaderService.java   |  92 ---
 2 files changed, 143 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff67c68d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 3f409e4..d64ee55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -17,25 +17,16 @@
 */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import java.io.File;
-import java.io.IOException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.CharMatcher;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -46,8 +37,8 @@ import org.apache.hadoop.yarn.security.Permission;
 import org.apache.hadoop.yarn.security.PrivilegedEntity;
 import org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -58,8 +49,17 @@ import org.w3c.dom.NodeList;
 import org.w3c.dom.Text;
 import org.xml.sax.SAXException;
 
-import com.google.common.base.CharMatcher;
-import com.google.common.annotations.VisibleForTesting;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import java.io.IOException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 @Public
 @Unstable
@@ -79,6 +79,9 @@ public class AllocationFileLoaderService extends 
AbstractService {
 
   public static final long THREAD_JOIN_TIMEOUT_MS = 1000;
 
+  //Permitted allocation file filesystems (case insensitive)
+  private static final String SUPPORTED_FS_REGEX =
+  "(?i)(hdfs)|(file)|(s3a)|(viewfs)";
   private static final String ROOT = "root";
   private static final AccessCo

hadoop git commit: YARN-7645. TestContainerResourceUsage#testUsageAfterAMRestartWithMultipleContainers is flakey with FairScheduler (rkanter)

2018-01-05 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk f8e7dd9b1 -> 2aa4f0a55


YARN-7645. 
TestContainerResourceUsage#testUsageAfterAMRestartWithMultipleContainers is 
flakey with FairScheduler (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2aa4f0a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2aa4f0a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2aa4f0a5

Branch: refs/heads/trunk
Commit: 2aa4f0a55936239d35babd84da2a0d1a261bc9bd
Parents: f8e7dd9
Author: Robert Kanter 
Authored: Fri Jan 5 13:55:09 2018 -0800
Committer: Robert Kanter 
Committed: Fri Jan 5 13:55:09 2018 -0800

--
 .../java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aa4f0a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 19ca6d7..302f5b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -1199,6 +1199,7 @@ public class MockRM extends ResourceManager {
 nm.nodeHeartbeat(true);
 ((AbstractYarnScheduler)rm.getResourceScheduler()).update();
 rm.drainEventsImplicitly();
+nm.nodeHeartbeat(true);
 MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
 rm.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.LAUNCHED);
 return am;
@@ -1215,6 +1216,7 @@ public class MockRM extends ResourceManager {
 nm.nodeHeartbeat(true);
 ((AbstractYarnScheduler)rm.getResourceScheduler()).update();
 rm.drainEventsImplicitly();
+nm.nodeHeartbeat(true);
 MockAM am = new MockAM(rm.getRMContext(), rm.masterService,
 attempt.getAppAttemptId());
 rm.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.LAUNCHED);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7557. It should be possible to specify resource types in the fair scheduler increment value (grepas via rkanter)

2018-01-05 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 83b513ac6 -> f8e7dd9b1


YARN-7557. It should be possible to specify resource types in the fair 
scheduler increment value (grepas via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8e7dd9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8e7dd9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8e7dd9b

Branch: refs/heads/trunk
Commit: f8e7dd9b10f0b1b9d80e6196eb2b0296b523d8f4
Parents: 83b513a
Author: Robert Kanter 
Authored: Fri Jan 5 11:15:06 2018 -0800
Committer: Robert Kanter 
Committed: Fri Jan 5 11:15:06 2018 -0800

--
 .../yarn/util/resource/ResourceUtils.java   |   2 +-
 .../fair/FairSchedulerConfiguration.java|  94 +-
 .../fair/TestFairSchedulerConfiguration.java| 289 +++
 .../src/site/markdown/FairScheduler.md  |   7 +-
 4 files changed, 379 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e7dd9b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 17567e8..b352752 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -59,7 +59,7 @@ public class ResourceUtils {
 
   private static final String MEMORY = ResourceInformation.MEMORY_MB.getName();
   private static final String VCORES = ResourceInformation.VCORES.getName();
-  private static final Pattern RESOURCE_REQUEST_VALUE_PATTERN =
+  public static final Pattern RESOURCE_REQUEST_VALUE_PATTERN =
   Pattern.compile("^([0-9]+) ?([a-zA-Z]*)$");
 
   private static final Pattern RESOURCE_NAME_PATTERN = Pattern.compile(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e7dd9b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index 38e71a7..90e487b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -17,6 +17,10 @@
 */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import static 
org.apache.hadoop.yarn.util.resource.ResourceUtils.RESOURCE_REQUEST_VALUE_PATTERN;
+
+import java.util.HashMap;
+import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -27,8 +31,10 @@ import 
org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
@@ -38,12 +44,18 @@ public class FairSchedulerConfiguration extends 
Configuration {
 
   public static final Log LOG = LogFactory.getLog(
   FairSchedulerConfiguration.class.getName());
-  
-  /** Increment request grant-able by the RM scheduler. 
-   * These properties are looked up in the yarn-site.xml  */
+
+  /** Increment request grant-able by the RM scheduler.
+   * These properties are looked up in the yarn-site.xml.
+   * Kept for backward-compatibility - the new preferred way to configure the
+   * increment is the yarn.r

[3/3] hadoop git commit: HADOOP-14246. Authentication Tokens should use SecureRandom instead of Random and 256 bit secrets (Conttributed by Robert Kanter via Daniel Templeton)

2018-01-04 Thread rkanter
HADOOP-14246. Authentication Tokens should use SecureRandom instead of Random 
and 256 bit secrets
(Conttributed by Robert Kanter via Daniel Templeton)

(cherry picked from commit 4dd6206547de8f694532579e37ba8103bafaeb12)
(cherry picked from commit f20aa38a1de73dd4a0b3a5b30636e8af246cd36a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88d951e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88d951e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88d951e3

Branch: refs/heads/branch-2.7
Commit: 88d951e30bb34d9a6e1e2a181419a7fcc88ebfd7
Parents: 9e5fffa
Author: Daniel Templeton 
Authored: Wed Apr 12 11:17:31 2017 -0700
Committer: Robert Kanter 
Committed: Thu Jan 4 15:44:56 2018 -0800

--
 .../util/RandomSignerSecretProvider.java|   9 +-
 .../util/ZKSignerSecretProvider.java|  10 +-
 .../util/TestRandomSignerSecretProvider.java|  68 ++--
 .../util/TestZKSignerSecretProvider.java| 154 ---
 hadoop-common-project/hadoop-common/CHANGES.txt |   4 +
 5 files changed, 209 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d951e3/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
index 41059a7..9245887 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
@@ -15,8 +15,9 @@ package org.apache.hadoop.security.authentication.util;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import java.nio.charset.Charset;
+import java.security.SecureRandom;
 import java.util.Random;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -32,7 +33,7 @@ public class RandomSignerSecretProvider extends 
RolloverSignerSecretProvider {
 
   public RandomSignerSecretProvider() {
 super();
-rand = new Random();
+rand = new SecureRandom();
   }
 
   /**
@@ -48,6 +49,8 @@ public class RandomSignerSecretProvider extends 
RolloverSignerSecretProvider {
 
   @Override
   protected byte[] generateNewSecret() {
-return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
+byte[] secret = new byte[32]; // 32 bytes = 256 bits
+rand.nextBytes(secret);
+return secret;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d951e3/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
index 11bfccd..91a2efd 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
@@ -16,6 +16,7 @@ package org.apache.hadoop.security.authentication.util;
 import com.google.common.annotations.VisibleForTesting;
 import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
+import java.security.SecureRandom;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -176,7 +177,7 @@ public class ZKSignerSecretProvider extends 
RolloverSignerSecretProvider {
 
   public ZKSignerSecretProvider() {
 super();
-rand = new Random();
+rand = new SecureRandom();
   }
 
   /**
@@ -369,8 +370,11 @@ public class ZKSignerSecretProvider extends 
RolloverSignerSecretProvider {
 }
   }
 
-  private byte[] generateRandomSecret() {
-return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
+  @VisibleForTesting
+  protected byte[] generateRandomSecret() {
+byte[] secret = new byte[32]; // 32 bytes = 256 bits
+rand.nextBytes(secret);
+return secret;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d951e3/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretPr

[2/3] hadoop git commit: HADOOP-12611. TestZKSignerSecretProvider#testMultipleInit occasionally fail (ebadger via rkanter)

2018-01-04 Thread rkanter
HADOOP-12611. TestZKSignerSecretProvider#testMultipleInit occasionally fail 
(ebadger via rkanter)

(cherry picked from commit c183b9de8d072a35dcde96a20b1550981f886e86)
(cherry picked from commit 7bfa595679a037c6956117ec266c7b2e62b48863)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e5fffa6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e5fffa6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e5fffa6

Branch: refs/heads/branch-2.7
Commit: 9e5fffa65f591986eb6a305faa5109ea4668d55d
Parents: e03cee7
Author: Robert Kanter 
Authored: Fri Oct 7 09:33:24 2016 -0700
Committer: Robert Kanter 
Committed: Thu Jan 4 15:42:05 2018 -0800

--
 .../util/RolloverSignerSecretProvider.java  |   2 +-
 .../util/TestZKSignerSecretProvider.java| 223 +--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 3 files changed, 104 insertions(+), 124 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e5fffa6/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
index bdca3e4..8ce4b23 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
 public abstract class RolloverSignerSecretProvider
 extends SignerSecretProvider {
 
-  private static Logger LOG = LoggerFactory.getLogger(
+  static Logger LOG = LoggerFactory.getLogger(
 RolloverSignerSecretProvider.class);
   /**
* Stores the currently valid secrets.  The current secret is the 0th element

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e5fffa6/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index 4f8b5ae..5e640bb 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -17,7 +17,12 @@ import java.util.Arrays;
 import java.util.Properties;
 import java.util.Random;
 import javax.servlet.ServletContext;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.curator.test.TestingServer;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -25,7 +30,6 @@ import org.junit.Test;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -34,8 +38,13 @@ public class TestZKSignerSecretProvider {
   private TestingServer zkServer;
 
   // rollover every 2 sec
-  private final int timeout = 4000;
-  private final long rolloverFrequency = Long.valueOf(timeout / 2);
+  private final int timeout = 100;
+  private final long rolloverFrequency = timeout / 2;
+
+  static final Log LOG = LogFactory.getLog(TestZKSignerSecretProvider.class);
+  {
+LogManager.getLogger( RolloverSignerSecretProvider.LOG.getName() 
).setLevel(Level.DEBUG);
+  }
 
   @Before
   public void setup() throws Exception {
@@ -60,8 +69,8 @@ public class TestZKSignerSecretProvider {
 byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
 byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
 byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
-ZKSignerSecretProvider secretProvider =
-spy(new ZKSignerSecretProvider(seed));
+MockZKSignerSecretProvider secretProvider =
+spy(new MockZKSignerSecretProvider(seed));
 Properties config = new Properties();
 config.setProperty

[1/3] hadoop git commit: HADOOP-12181. Fix intermittent test failure of TestZKSignerSecretProvider. Contributed by Masatake Iwasaki.

2018-01-04 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 1c798e700 -> 88d951e30


HADOOP-12181. Fix intermittent test failure of TestZKSignerSecretProvider. 
Contributed by Masatake Iwasaki.

(cherry picked from commit def7490b29dddca39674b5ec31a6067deed98396)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e03cee73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e03cee73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e03cee73

Branch: refs/heads/branch-2.7
Commit: e03cee7300b62fc8ff06e7d10e21927a3f40b717
Parents: 1c798e7
Author: Haohui Mai 
Authored: Sun Nov 22 16:56:15 2015 -0800
Committer: Robert Kanter 
Committed: Thu Jan 4 15:37:10 2018 -0800

--
 .../util/TestZKSignerSecretProvider.java| 56 
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 2 files changed, 38 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e03cee73/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index d7b6e17..4f8b5ae 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -22,12 +22,21 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.timeout;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestZKSignerSecretProvider {
 
   private TestingServer zkServer;
 
+  // rollover every 2 sec
+  private final int timeout = 4000;
+  private final long rolloverFrequency = Long.valueOf(timeout / 2);
+
   @Before
   public void setup() throws Exception {
 zkServer = new TestingServer();
@@ -45,14 +54,14 @@ public class TestZKSignerSecretProvider {
   // Test just one ZKSignerSecretProvider to verify that it works in the
   // simplest case
   public void testOne() throws Exception {
-long rolloverFrequency = 15 * 1000; // rollover every 15 sec
 // use the same seed so we can predict the RNG
 long seed = System.currentTimeMillis();
 Random rand = new Random(seed);
 byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
 byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
 byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
-ZKSignerSecretProvider secretProvider = new ZKSignerSecretProvider(seed);
+ZKSignerSecretProvider secretProvider =
+spy(new ZKSignerSecretProvider(seed));
 Properties config = new Properties();
 config.setProperty(
 ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
@@ -68,7 +77,7 @@ public class TestZKSignerSecretProvider {
   Assert.assertEquals(2, allSecrets.length);
   Assert.assertArrayEquals(secret1, allSecrets[0]);
   Assert.assertNull(allSecrets[1]);
-  Thread.sleep((rolloverFrequency + 2000));
+  verify(secretProvider, timeout(timeout).times(1)).rollSecret();
 
   currentSecret = secretProvider.getCurrentSecret();
   allSecrets = secretProvider.getAllSecrets();
@@ -76,7 +85,7 @@ public class TestZKSignerSecretProvider {
   Assert.assertEquals(2, allSecrets.length);
   Assert.assertArrayEquals(secret2, allSecrets[0]);
   Assert.assertArrayEquals(secret1, allSecrets[1]);
-  Thread.sleep((rolloverFrequency + 2000));
+  verify(secretProvider, timeout(timeout).times(2)).rollSecret();
 
   currentSecret = secretProvider.getCurrentSecret();
   allSecrets = secretProvider.getAllSecrets();
@@ -84,7 +93,7 @@ public class TestZKSignerSecretProvider {
   Assert.assertEquals(2, allSecrets.length);
   Assert.assertArrayEquals(secret3, allSecrets[0]);
   Assert.assertArrayEquals(secret2, allSecrets[1]);
-  Thread.sleep((rolloverFrequency + 2000));
+  verify(secretProvider, timeout(timeout).times(3)).rollSecret();
 } finally {
   secretProvider.destroy();
 }
@@ -92,7 +101,6 @@ public class TestZKSignerSecretProvider {
 
   @Test
   public void testMultipleInit() throws Exception {
-long rolloverFrequency = 15 * 1000; // rollover every 15

hadoop git commit: YARN-7622. Allow fair-scheduler configuration on HDFS (gphillips via rkanter)

2018-01-03 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3ba985997 -> 7a5504480


YARN-7622. Allow fair-scheduler configuration on HDFS (gphillips via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a550448
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a550448
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a550448

Branch: refs/heads/trunk
Commit: 7a550448036c9d140d2c35c684cc8023ceb8880e
Parents: 3ba9859
Author: Robert Kanter 
Authored: Wed Jan 3 15:31:50 2018 -0800
Committer: Robert Kanter 
Committed: Wed Jan 3 15:31:50 2018 -0800

--
 .../fair/AllocationFileLoaderService.java   | 104 +++
 .../fair/TestAllocationFileLoaderService.java   |  92 
 2 files changed, 133 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a550448/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 597af94..f73e05f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -17,25 +17,15 @@
 */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import java.io.File;
-import java.io.IOException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -45,8 +35,8 @@ import org.apache.hadoop.yarn.security.AccessType;
 import org.apache.hadoop.yarn.security.Permission;
 import org.apache.hadoop.yarn.security.PrivilegedEntity;
 import org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -57,7 +47,17 @@ import org.w3c.dom.NodeList;
 import org.w3c.dom.Text;
 import org.xml.sax.SAXException;
 
-import com.google.common.annotations.VisibleForTesting;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import java.io.IOException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 @Public
 @Unstable
@@ -77,6 +77,9 @@ public class AllocationFileLoaderService extends 
AbstractService {
 
   public static final long THREAD_JOIN_TIMEOUT_MS = 1000;
 
+  //Permitted allocation file filesystems (case insensitive)
+  private static final String SUPPORTED_FS_REGEX =
+  "(?i)(hdfs)|(file)|(s3a)|(viewfs)";
   private static final String ROOT = "root";
   private static final AccessControlList EVERYBODY_ACL =
   new AccessControlList("*");
@@ -85,12 +88,14 @@ public class AllocationFileLoade

hadoop git commit: YARN-7577. Unit Fail: TestAMRestart#testPreemptedAMRestartOnRMRestart (miklos.szeg...@cloudera.com via rkanter)

2017-12-20 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1ba491ff9 -> 382215c72


YARN-7577. Unit Fail: TestAMRestart#testPreemptedAMRestartOnRMRestart 
(miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/382215c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/382215c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/382215c7

Branch: refs/heads/trunk
Commit: 382215c72b93d6a97d813f407cf6496a7c3f2a4a
Parents: 1ba491ff
Author: Robert Kanter 
Authored: Wed Dec 20 13:39:00 2017 -0800
Committer: Robert Kanter 
Committed: Wed Dec 20 13:39:00 2017 -0800

--
 .../applicationsmanager/TestAMRestart.java  | 131 +++
 1 file changed, 73 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/382215c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 3d523aa..4add186 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -45,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockMemoryRMStateStore;
+import 
org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase;
 import org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
@@ -63,14 +65,20 @@ import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 import org.junit.Test;
 
-public class TestAMRestart {
+/**
+ * Test AM restart functions.
+ */
+public class TestAMRestart extends ParameterizedSchedulerTestBase {
+
+  public TestAMRestart(SchedulerType type) throws IOException {
+super(type);
+  }
 
   @Test(timeout = 3)
   public void testAMRestartWithExistingContainers() throws Exception {
-YarnConfiguration conf = new YarnConfiguration();
-conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-MockRM rm1 = new MockRM(conf);
+MockRM rm1 = new MockRM(getConf());
 rm1.start();
 RMApp app1 =
 rm1.submitApp(200, "name", "user",
@@ -266,15 +274,14 @@ public class TestAMRestart {
 
   @Test(timeout = 3)
   public void testNMTokensRebindOnAMRestart() throws Exception {
-YarnConfiguration conf = new YarnConfiguration();
-conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 3);
+getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 3);
 // To prevent test from blacklisting nm1 for AM, we sit threshold to half
 // of 2 nodes which is 1
-conf.setFloat(
+getConf().setFloat(
 YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD,
 0.5f);
 
-MockRM rm1 = new MockRM(conf);
+MockRM rm1 = new MockRM(getConf());
 rm1.start();
 RMApp app1 =
 rm1.submitApp(200, "myname", "myuser",
@@ -378,11 +385,11 @@ public class TestAMRestart {
   // should not be counted towards AM max retry count.
   @Test(timeout = 10)
   public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception {
-YarnConfiguration conf = new YarnConfiguration();
-conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-MockRM rm1 = new MockRM(conf);
+getConf().set

hadoop git commit: MAPREDUCE-7018. Apply erasure coding properly to framework tarball and support plain tar (miklos.szeg...@cloudera.com via rkanter)

2017-12-11 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 00129c531 -> 2316f5269


MAPREDUCE-7018. Apply erasure coding properly to framework tarball and support 
plain tar (miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2316f526
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2316f526
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2316f526

Branch: refs/heads/trunk
Commit: 2316f526902e827b6c1b92a5bddef72d211bc742
Parents: 00129c5
Author: Robert Kanter 
Authored: Mon Dec 11 14:00:42 2017 -0800
Committer: Robert Kanter 
Committed: Mon Dec 11 14:00:42 2017 -0800

--
 .../mapred/uploader/FrameworkUploader.java  | 59 +--
 .../mapred/uploader/TestFrameworkUploader.java  | 79 +++-
 2 files changed, 79 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2316f526/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
index d1cd740..a374262 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/FrameworkUploader.java
@@ -81,7 +81,6 @@ public class FrameworkUploader implements Runnable {
 
   @VisibleForTesting
   OutputStream targetStream = null;
-  private Path targetPath = null;
   private String alias = null;
 
   private void printHelp(Options options) {
@@ -140,11 +139,12 @@ public class FrameworkUploader implements Runnable {
 }
   }
 
-  private void beginUpload() throws IOException, UploaderException {
+  @VisibleForTesting
+  void beginUpload() throws IOException, UploaderException {
 if (targetStream == null) {
   validateTargetPath();
   int lastIndex = target.indexOf('#');
-  targetPath =
+  Path targetPath =
   new Path(
   target.substring(
   0, lastIndex == -1 ? target.length() : lastIndex));
@@ -153,7 +153,37 @@ public class FrameworkUploader implements Runnable {
   targetPath.getName();
   LOG.info("Target " + targetPath);
   FileSystem fileSystem = targetPath.getFileSystem(new Configuration());
-  targetStream = fileSystem.create(targetPath, true);
+
+  targetStream = null;
+  if (fileSystem instanceof DistributedFileSystem) {
+LOG.info("Set replication to " +
+replication + " for path: " + targetPath);
+LOG.info("Disabling Erasure Coding for path: " + targetPath);
+DistributedFileSystem dfs = (DistributedFileSystem)fileSystem;
+DistributedFileSystem.HdfsDataOutputStreamBuilder builder =
+dfs.createFile(targetPath)
+.overwrite(true)
+.ecPolicyName(
+SystemErasureCodingPolicies.getReplicationPolicy().getName());
+if (replication > 0) {
+  builder.replication(replication);
+}
+targetStream = builder.build();
+  } else {
+LOG.warn("Cannot set replication to " +
+replication + " for path: " + targetPath +
+" on a non-distributed fileystem " +
+fileSystem.getClass().getName());
+  }
+  if (targetStream == null) {
+targetStream = fileSystem.create(targetPath, true);
+  }
+
+  if (targetPath.getName().endsWith("gz") ||
+  targetPath.getName().endsWith("tgz")) {
+LOG.info("Creating GZip");
+targetStream = new GZIPOutputStream(targetStream);
+  }
 }
   }
 
@@ -162,7 +192,7 @@ public class FrameworkUploader implements Runnable {
 beginUpload();
 LOG.info("Compressing tarball");
 try (TarArchiveOutputStream out = new TarArchiveOutputStream(
-new GZIPOutputStream(targetStream))) {
+targetStream)) {
   for (String fullPath : filteredInputFiles) {
 LOG.info("Adding " + fullPath);
 File file = new File(fullPath);
@@ -178,25 +208,6 @@ public class FrameworkUploader implements Runnable {
 targetStream.close();
   }
 }
-
-if (targetPat

hadoop git commit: YARN-6483. addendum to skip failing test (asuresh via rkanter)

2017-12-06 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 8bfd30b92 -> cdf305c33


YARN-6483. addendum to skip failing test (asuresh via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdf305c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdf305c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdf305c3

Branch: refs/heads/branch-3.0
Commit: cdf305c33794f0421d407362a7d2ade43c6f1674
Parents: 8bfd30b
Author: Robert Kanter 
Authored: Wed Dec 6 12:10:59 2017 -0800
Committer: Robert Kanter 
Committed: Wed Dec 6 12:10:59 2017 -0800

--
 .../yarn/server/resourcemanager/TestResourceTrackerService.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdf305c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 8931c16..5cd574e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -105,6 +105,7 @@ import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.junit.After;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
@@ -303,6 +304,7 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
 Assert.assertEquals(NodeAction.SHUTDOWN, nodeHeartbeat3.getNodeAction());
   }
 
+  @Ignore
   @Test
   public void testGracefulDecommissionDefaultTimeoutResolution()
   throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5594. Handle old RMDelegationToken format when recovering RM (rkanter)

2017-12-04 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 274bbb193 -> 54b5da83e


YARN-5594. Handle old RMDelegationToken format when recovering RM (rkanter)

(cherry picked from commit d8863fc16fa3cbcdda5b99f79386c43e4fae5917)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54b5da83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54b5da83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54b5da83

Branch: refs/heads/branch-2
Commit: 54b5da83ec5337c9ee06adcc7bed514072cf2665
Parents: 274bbb1
Author: Robert Kanter 
Authored: Mon Dec 4 13:14:55 2017 -0800
Committer: Robert Kanter 
Committed: Mon Dec 4 13:18:04 2017 -0800

--
 .../client/YARNDelegationTokenIdentifier.java   |  11 ++
 .../yarn/security/TestYARNTokenIdentifier.java  | 115 +++
 .../recovery/FileSystemRMStateStore.java|   4 +-
 .../recovery/LeveldbRMStateStore.java   |   5 +-
 .../recovery/RMStateStoreUtils.java |  69 +++
 .../recovery/ZKRMStateStore.java|   3 +-
 .../RMDelegationTokenIdentifierData.java|   8 ++
 .../resourcemanager/TestClientRMTokens.java |  44 +++
 .../recovery/TestRMStateStoreUtils.java |  81 +
 9 files changed, 282 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54b5da83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
index 40ea858..da6a8c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
@@ -22,6 +22,7 @@ import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.io.Text;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
@@ -64,6 +65,11 @@ public abstract class YARNDelegationTokenIdentifier extends
 setMasterKeyId(builder.getMasterKeyId());
   }
 
+  public synchronized void readFieldsInOldFormat(DataInput in)
+  throws IOException {
+super.readFields(in);
+  }
+
   private void setBuilderFields() {
 if (builder.getOwner() != null &&
 !builder.getOwner().equals(getOwner().toString())) {
@@ -97,6 +103,11 @@ public abstract class YARNDelegationTokenIdentifier extends
 builder.build().writeTo((DataOutputStream) out);
   }
 
+  @VisibleForTesting
+  public synchronized void writeInOldFormat(DataOutput out) throws IOException 
{
+super.write(out);
+  }
+
   public YARNDelegationTokenIdentifierProto getProto() {
 setBuilderFields();
 return builder.build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54b5da83/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
index 130a65e..82e1943 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.HadoopKerberosName;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -214,10 +215,20 @@ public class TestYARNTokenIdentifier {
 Assert.assertEquals(ExecutionType.GUARANTEED,
 anotherToken.getExecutionType());
   }
-  
+
   @Test
   public void testRMDelegationTokenIdentifier() throws 

hadoop git commit: YARN-5594. Handle old RMDelegationToken format when recovering RM (rkanter)

2017-12-04 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk d30d57828 -> d8863fc16


YARN-5594. Handle old RMDelegationToken format when recovering RM (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8863fc1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8863fc1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8863fc1

Branch: refs/heads/trunk
Commit: d8863fc16fa3cbcdda5b99f79386c43e4fae5917
Parents: d30d578
Author: Robert Kanter 
Authored: Mon Dec 4 13:14:55 2017 -0800
Committer: Robert Kanter 
Committed: Mon Dec 4 13:14:55 2017 -0800

--
 .../client/YARNDelegationTokenIdentifier.java   |  11 ++
 .../yarn/security/TestYARNTokenIdentifier.java  | 115 +++
 .../recovery/FileSystemRMStateStore.java|   4 +-
 .../recovery/LeveldbRMStateStore.java   |   5 +-
 .../recovery/RMStateStoreUtils.java |  69 +++
 .../recovery/ZKRMStateStore.java|   3 +-
 .../RMDelegationTokenIdentifierData.java|   8 ++
 .../resourcemanager/TestClientRMTokens.java |  44 +++
 .../recovery/TestRMStateStoreUtils.java |  81 +
 9 files changed, 282 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8863fc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
index 40ea858..da6a8c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/YARNDelegationTokenIdentifier.java
@@ -22,6 +22,7 @@ import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.io.Text;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
@@ -64,6 +65,11 @@ public abstract class YARNDelegationTokenIdentifier extends
 setMasterKeyId(builder.getMasterKeyId());
   }
 
+  public synchronized void readFieldsInOldFormat(DataInput in)
+  throws IOException {
+super.readFields(in);
+  }
+
   private void setBuilderFields() {
 if (builder.getOwner() != null &&
 !builder.getOwner().equals(getOwner().toString())) {
@@ -97,6 +103,11 @@ public abstract class YARNDelegationTokenIdentifier extends
 builder.build().writeTo((DataOutputStream) out);
   }
 
+  @VisibleForTesting
+  public synchronized void writeInOldFormat(DataOutput out) throws IOException 
{
+super.write(out);
+  }
+
   public YARNDelegationTokenIdentifierProto getProto() {
 setBuilderFields();
 return builder.build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8863fc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
index 130a65e..82e1943 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.HadoopKerberosName;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -214,10 +215,20 @@ public class TestYARNTokenIdentifier {
 Assert.assertEquals(ExecutionType.GUARANTEED,
 anotherToken.getExecutionType());
   }
-  
+
   @Test
   public void testRMDelegationTokenIdentifier() throws IOException {
-
+testRMDelegationTokenIdentifier(false);
+  }
+
+  @Test
+  

hadoop git commit: YARN-4813. TestRMWebServicesDelegationTokenAuthentication.testDoAs fails intermittently (grepas via rkanter)

2017-12-01 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d0fc1cd0c -> edf9fd51f


YARN-4813. TestRMWebServicesDelegationTokenAuthentication.testDoAs fails 
intermittently (grepas via rkanter)

(cherry picked from commit c83fe4491731c994a4867759d80db31d9c1cab60)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edf9fd51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edf9fd51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edf9fd51

Branch: refs/heads/branch-2
Commit: edf9fd51fbe6821d402246c0d305ff535b1118f6
Parents: d0fc1cd
Author: Robert Kanter 
Authored: Fri Dec 1 12:18:13 2017 -0800
Committer: Robert Kanter 
Committed: Fri Dec 1 12:21:18 2017 -0800

--
 ...stRMWebServicesDelegationTokenAuthentication.java | 15 ++-
 1 file changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edf9fd51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
index b406fdb..41e56ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
@@ -76,6 +76,8 @@ public class TestRMWebServicesDelegationTokenAuthentication {
 TestRMWebServicesDelegationTokenAuthentication.class.getName() + "-root");
   private static File httpSpnegoKeytabFile = new File(
 KerberosTestUtils.getKeytabFile());
+  private static final String SUN_SECURITY_KRB5_RCACHE_KEY =
+  "sun.security.krb5.rcache";
 
   private static String httpSpnegoPrincipal = KerberosTestUtils
 .getServerPrincipal();
@@ -83,7 +85,7 @@ public class TestRMWebServicesDelegationTokenAuthentication {
   private static boolean miniKDCStarted = false;
   private static MiniKdc testMiniKDC;
   private static MockRM rm;
-
+  private static String sunSecurityKrb5RcacheValue;
 
   String delegationTokenHeader;
 
@@ -98,6 +100,11 @@ public class TestRMWebServicesDelegationTokenAuthentication 
{
   @BeforeClass
   public static void setUp() {
 try {
+  // Disabling kerberos replay cache to avoid "Request is a replay" errors
+  // caused by frequent webservice calls
+  sunSecurityKrb5RcacheValue =
+  System.getProperty(SUN_SECURITY_KRB5_RCACHE_KEY);
+  System.setProperty(SUN_SECURITY_KRB5_RCACHE_KEY, "none");
   testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
   setupKDC();
   setupAndStartRM();
@@ -114,6 +121,12 @@ public class 
TestRMWebServicesDelegationTokenAuthentication {
 if (rm != null) {
   rm.stop();
 }
+if (sunSecurityKrb5RcacheValue == null) {
+  System.clearProperty(SUN_SECURITY_KRB5_RCACHE_KEY);
+} else {
+  System.setProperty(SUN_SECURITY_KRB5_RCACHE_KEY,
+  sunSecurityKrb5RcacheValue);
+}
   }
 
   @Parameterized.Parameters


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-4813. TestRMWebServicesDelegationTokenAuthentication.testDoAs fails intermittently (grepas via rkanter)

2017-12-01 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3b78607a0 -> c83fe4491


YARN-4813. TestRMWebServicesDelegationTokenAuthentication.testDoAs fails 
intermittently (grepas via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c83fe449
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c83fe449
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c83fe449

Branch: refs/heads/trunk
Commit: c83fe4491731c994a4867759d80db31d9c1cab60
Parents: 3b78607
Author: Robert Kanter 
Authored: Fri Dec 1 12:18:13 2017 -0800
Committer: Robert Kanter 
Committed: Fri Dec 1 12:18:13 2017 -0800

--
 ...stRMWebServicesDelegationTokenAuthentication.java | 15 ++-
 1 file changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c83fe449/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
index b406fdb..41e56ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
@@ -76,6 +76,8 @@ public class TestRMWebServicesDelegationTokenAuthentication {
 TestRMWebServicesDelegationTokenAuthentication.class.getName() + "-root");
   private static File httpSpnegoKeytabFile = new File(
 KerberosTestUtils.getKeytabFile());
+  private static final String SUN_SECURITY_KRB5_RCACHE_KEY =
+  "sun.security.krb5.rcache";
 
   private static String httpSpnegoPrincipal = KerberosTestUtils
 .getServerPrincipal();
@@ -83,7 +85,7 @@ public class TestRMWebServicesDelegationTokenAuthentication {
   private static boolean miniKDCStarted = false;
   private static MiniKdc testMiniKDC;
   private static MockRM rm;
-
+  private static String sunSecurityKrb5RcacheValue;
 
   String delegationTokenHeader;
 
@@ -98,6 +100,11 @@ public class TestRMWebServicesDelegationTokenAuthentication 
{
   @BeforeClass
   public static void setUp() {
 try {
+  // Disabling kerberos replay cache to avoid "Request is a replay" errors
+  // caused by frequent webservice calls
+  sunSecurityKrb5RcacheValue =
+  System.getProperty(SUN_SECURITY_KRB5_RCACHE_KEY);
+  System.setProperty(SUN_SECURITY_KRB5_RCACHE_KEY, "none");
   testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
   setupKDC();
   setupAndStartRM();
@@ -114,6 +121,12 @@ public class 
TestRMWebServicesDelegationTokenAuthentication {
 if (rm != null) {
   rm.stop();
 }
+if (sunSecurityKrb5RcacheValue == null) {
+  System.clearProperty(SUN_SECURITY_KRB5_RCACHE_KEY);
+} else {
+  System.setProperty(SUN_SECURITY_KRB5_RCACHE_KEY,
+  sunSecurityKrb5RcacheValue);
+}
   }
 
   @Parameterized.Parameters


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6994. Uploader tool for Distributed Cache Deploy code changes (miklos.szeg...@cloudera.com via rkanter)

2017-12-01 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 21d362735 -> 3b78607a0


MAPREDUCE-6994. Uploader tool for Distributed Cache Deploy code changes  
(miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b78607a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b78607a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b78607a

Branch: refs/heads/trunk
Commit: 3b78607a02f3a81ad730975ecdfa35967413271d
Parents: 21d3627
Author: Robert Kanter 
Authored: Fri Dec 1 12:11:43 2017 -0800
Committer: Robert Kanter 
Committed: Fri Dec 1 12:12:15 2017 -0800

--
 hadoop-mapreduce-project/bin/mapred |   4 +
 .../hadoop-mapreduce-client-uploader/pom.xml|  67 
 .../hadoop/mapred/uploader/DefaultJars.java |  46 +++
 .../mapred/uploader/FrameworkUploader.java  | 384 +++
 .../mapred/uploader/UploaderException.java  |  36 ++
 .../hadoop/mapred/uploader/package-info.java|  28 ++
 .../mapred/uploader/TestFrameworkUploader.java  | 315 +++
 .../hadoop-mapreduce-client/pom.xml |   1 +
 8 files changed, 881 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b78607a/hadoop-mapreduce-project/bin/mapred
--
diff --git a/hadoop-mapreduce-project/bin/mapred 
b/hadoop-mapreduce-project/bin/mapred
index f66f563..ce9ce21 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -32,6 +32,7 @@ function hadoop_usage
   hadoop_add_subcommand "pipes" client "run a Pipes job"
   hadoop_add_subcommand "queue" client "get information regarding JobQueues"
   hadoop_add_subcommand "sampler" client "sampler"
+  hadoop_add_subcommand "frameworkuploader" admin "mapreduce framework upload"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
 }
@@ -92,6 +93,9 @@ function mapredcmd_case
 sampler)
   HADOOP_CLASSNAME=org.apache.hadoop.mapred.lib.InputSampler
 ;;
+frameworkuploader)
+  HADOOP_CLASSNAME=org.apache.hadoop.mapred.uploader.FrameworkUploader
+;;
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b78607a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
new file mode 100644
index 000..a721404
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
@@ -0,0 +1,67 @@
+
+
+http://maven.apache.org/POM/4.0.0";
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+
+hadoop-mapreduce-client
+org.apache.hadoop
+3.1.0-SNAPSHOT
+
+4.0.0
+hadoop-mapreduce-client-uploader
+3.1.0-SNAPSHOT
+Apache Hadoop MapReduce Uploader
+
+
+
+commons-cli
+commons-cli
+
+
+org.apache.commons
+commons-compress
+
+
+org.apache.hadoop
+hadoop-common
+
+
+org.apache.hadoop
+hadoop-hdfs-client
+
+
+
+
+${project.parent.basedir}/../
+
+
+
+
+
+org.apache.maven.plugins
+maven-jar-plugin
+
+
+
+
org.apache.hadoop.mapred.uploader.FrameworkUploader
+
+
+
+
+
+
+
+
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b78607a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/DefaultJars.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/DefaultJars.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/src/main/java/org/apache/hadoop/mapred/uploader/DefaultJars.j

hadoop git commit: HADOOP-13493. Compatibility Docs should clarify the policy for what takes precedence when a conflict is found (templedf via rkanter)

2017-11-30 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ee4525e07 -> 2c2ff7da0


HADOOP-13493. Compatibility Docs should clarify the policy for what takes 
precedence when a conflict is found (templedf via rkanter)

(cherry picked from commit 75a3ab88f5f4ea6abf0a56cb8058e17b5a5fe403)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c2ff7da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c2ff7da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c2ff7da

Branch: refs/heads/branch-3.0
Commit: 2c2ff7da052c35f62dff3ede22bf6c1ddec6c5bb
Parents: ee4525e
Author: Robert Kanter 
Authored: Thu Nov 30 07:39:15 2017 -0800
Committer: Robert Kanter 
Committed: Thu Nov 30 07:39:51 2017 -0800

--
 .../src/site/markdown/Compatibility.md  | 29 +++-
 1 file changed, 22 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c2ff7da/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 461ff17..54be412 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -117,13 +117,7 @@ Compatibility types
 
 Developers SHOULD annotate all Hadoop interfaces and classes with the
 @InterfaceAudience and @InterfaceStability annotations to describe the
-intended audience and stability. Annotations may be at the package, class, or
-member variable or method level. Member variable and method annotations SHALL
-override class annotations, and class annotations SHALL override package
-annotations. A package, class, or member variable or method that is not
-annotated SHALL be interpreted as implicitly
-[Private](./InterfaceClassification.html#Private) and
-[Unstable](./InterfaceClassification.html#Unstable).
+intended audience and stability.
 
 * @InterfaceAudience captures the intended audience. Possible values are
 [Public](./InterfaceClassification.html#Public) (for end users and external
@@ -134,6 +128,27 @@ etc.), and 
[Private](./InterfaceClassification.html#Private)
 * @InterfaceStability describes what types of interface changes are permitted. 
Possible values are [Stable](./InterfaceClassification.html#Stable), 
[Evolving](./InterfaceClassification.html#Evolving), and 
[Unstable](./InterfaceClassification.html#Unstable).
 * @Deprecated notes that the package, class, or member variable or method 
could potentially be removed in the future and should not be used.
 
+Annotations MAY be applied at the package, class, or method level. If a method
+has no privacy or stability annotation, it SHALL inherit its intended audience
+or stability level from the class to which it belongs. If a class has no
+privacy or stability annotation, it SHALL inherit its intended audience or
+stability level from the package to which it belongs. If a package has no
+privacy or stability annotation, it SHALL be assumed to be
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable),
+respectively.
+
+In the event that an element's audience or stability annotation conflicts with
+the corresponding annotation of its parent (whether explicit or inherited), the
+element's audience or stability (respectively) SHALL be determined by the
+more restrictive annotation. For example, if a
+[Private](./InterfaceClassification.html#Private) method is contained
+in a [Public](./InterfaceClassification.html#Public) class, then the method
+SHALL be treated as [Private](./InterfaceClassification.html#Private). If a
+[Public](./InterfaceClassification.html#Public) method is contained in a
+[Private](./InterfaceClassification.html#Private) class, the method SHALL be
+treated as [Private](./InterfaceClassification.html#Private).
+
  Use Cases
 
 * 
[Public](./InterfaceClassification.html#Public)-[Stable](./InterfaceClassification.html#Stable)
 API compatibility is required to ensure end-user programs and downstream 
projects continue to work without modification.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13493. Compatibility Docs should clarify the policy for what takes precedence when a conflict is found (templedf via rkanter)

2017-11-30 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0e560f3b8 -> 75a3ab88f


HADOOP-13493. Compatibility Docs should clarify the policy for what takes 
precedence when a conflict is found (templedf via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75a3ab88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75a3ab88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75a3ab88

Branch: refs/heads/trunk
Commit: 75a3ab88f5f4ea6abf0a56cb8058e17b5a5fe403
Parents: 0e560f3
Author: Robert Kanter 
Authored: Thu Nov 30 07:39:15 2017 -0800
Committer: Robert Kanter 
Committed: Thu Nov 30 07:39:15 2017 -0800

--
 .../src/site/markdown/Compatibility.md  | 29 +++-
 1 file changed, 22 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75a3ab88/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 461ff17..54be412 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -117,13 +117,7 @@ Compatibility types
 
 Developers SHOULD annotate all Hadoop interfaces and classes with the
 @InterfaceAudience and @InterfaceStability annotations to describe the
-intended audience and stability. Annotations may be at the package, class, or
-member variable or method level. Member variable and method annotations SHALL
-override class annotations, and class annotations SHALL override package
-annotations. A package, class, or member variable or method that is not
-annotated SHALL be interpreted as implicitly
-[Private](./InterfaceClassification.html#Private) and
-[Unstable](./InterfaceClassification.html#Unstable).
+intended audience and stability.
 
 * @InterfaceAudience captures the intended audience. Possible values are
 [Public](./InterfaceClassification.html#Public) (for end users and external
@@ -134,6 +128,27 @@ etc.), and 
[Private](./InterfaceClassification.html#Private)
 * @InterfaceStability describes what types of interface changes are permitted. 
Possible values are [Stable](./InterfaceClassification.html#Stable), 
[Evolving](./InterfaceClassification.html#Evolving), and 
[Unstable](./InterfaceClassification.html#Unstable).
 * @Deprecated notes that the package, class, or member variable or method 
could potentially be removed in the future and should not be used.
 
+Annotations MAY be applied at the package, class, or method level. If a method
+has no privacy or stability annotation, it SHALL inherit its intended audience
+or stability level from the class to which it belongs. If a class has no
+privacy or stability annotation, it SHALL inherit its intended audience or
+stability level from the package to which it belongs. If a package has no
+privacy or stability annotation, it SHALL be assumed to be
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable),
+respectively.
+
+In the event that an element's audience or stability annotation conflicts with
+the corresponding annotation of its parent (whether explicit or inherited), the
+element's audience or stability (respectively) SHALL be determined by the
+more restrictive annotation. For example, if a
+[Private](./InterfaceClassification.html#Private) method is contained
+in a [Public](./InterfaceClassification.html#Public) class, then the method
+SHALL be treated as [Private](./InterfaceClassification.html#Private). If a
+[Public](./InterfaceClassification.html#Public) method is contained in a
+[Private](./InterfaceClassification.html#Private) class, the method SHALL be
+treated as [Private](./InterfaceClassification.html#Private).
+
  Use Cases
 
 * 
[Public](./InterfaceClassification.html#Public)-[Stable](./InterfaceClassification.html#Stable)
 API compatibility is required to ensure end-user programs and downstream 
projects continue to work without modification.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14982. Clients using FailoverOnNetworkExceptionRetry can go into a loop if they're used without authenticating with kerberos in HA env (pbacsko via rkanter)

2017-11-16 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a081d9148 -> 8db49df4a


HADOOP-14982. Clients using FailoverOnNetworkExceptionRetry can go into a loop 
if they're used without authenticating with kerberos in HA env (pbacsko via 
rkanter)

(cherry picked from commit f2efaf013f7577948061abbb49c6d17c375e92cc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8db49df4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8db49df4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8db49df4

Branch: refs/heads/branch-2
Commit: 8db49df4a2058bd9bba8afea4443176b4d01daa0
Parents: a081d91
Author: Robert Kanter 
Authored: Thu Nov 16 11:11:19 2017 -0800
Committer: Robert Kanter 
Committed: Thu Nov 16 11:12:01 2017 -0800

--
 .../apache/hadoop/io/retry/RetryPolicies.java   | 22 +++-
 .../apache/hadoop/io/retry/TestRetryProxy.java  | 22 
 .../io/retry/UnreliableImplementation.java  | 10 +
 .../hadoop/io/retry/UnreliableInterface.java|  6 +-
 4 files changed, 58 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8db49df4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index fa0cb6e..adf23c0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -32,11 +32,14 @@ import java.util.Map.Entry;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
+import javax.security.sasl.SaslException;
+
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.ietf.jgss.GSSException;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -663,6 +666,11 @@ public class RetryPolicies {
 + retries + ") exceeded maximum allowed (" + maxRetries + ")");
   }
 
+  if (isSaslFailure(e)) {
+  return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
+  "SASL failure");
+  }
+
   if (e instanceof ConnectException ||
   e instanceof EOFException ||
   e instanceof NoRouteToHostException ||
@@ -716,7 +724,7 @@ public class RetryPolicies {
   private static long calculateExponentialTime(long time, int retries) {
 return calculateExponentialTime(time, retries, Long.MAX_VALUE);
   }
-  
+
   private static boolean isWrappedStandbyException(Exception e) {
 if (!(e instanceof RemoteException)) {
   return false;
@@ -725,6 +733,18 @@ public class RetryPolicies {
 StandbyException.class);
 return unwrapped instanceof StandbyException;
   }
+
+  private static boolean isSaslFailure(Exception e) {
+  Throwable current = e;
+  do {
+  if (current instanceof SaslException) {
+return true;
+  }
+  current = current.getCause();
+  } while (current != null);
+
+  return false;
+  }
   
   static RetriableException getWrappedRetriableException(Exception e) {
 if (!(e instanceof RemoteException)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8db49df4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 649af89..1accb0a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -39,6 +39,8 @@ import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
+import javax.security.sasl.SaslException;
+
 import static org.apache.hadoop.io.retry.RetryPolicies.*;
 import static org.junit.Assert.*;
 import static org.mockito.Matchers.any;
@@ -326,4 +328,24 @@ public class TestRetryProxy {
 assertEquals(InterruptedException.class, e.getCause()

hadoop git commit: HADOOP-14982. Clients using FailoverOnNetworkExceptionRetry can go into a loop if they're used without authenticating with kerberos in HA env (pbacsko via rkanter)

2017-11-16 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6bf2c3019 -> f2efaf013


HADOOP-14982. Clients using FailoverOnNetworkExceptionRetry can go into a loop 
if they're used without authenticating with kerberos in HA env (pbacsko via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2efaf01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2efaf01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2efaf01

Branch: refs/heads/trunk
Commit: f2efaf013f7577948061abbb49c6d17c375e92cc
Parents: 6bf2c30
Author: Robert Kanter 
Authored: Thu Nov 16 11:11:19 2017 -0800
Committer: Robert Kanter 
Committed: Thu Nov 16 11:11:19 2017 -0800

--
 .../apache/hadoop/io/retry/RetryPolicies.java   | 22 +++-
 .../apache/hadoop/io/retry/TestRetryProxy.java  | 22 
 .../io/retry/UnreliableImplementation.java  | 10 +
 .../hadoop/io/retry/UnreliableInterface.java|  6 +-
 4 files changed, 58 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2efaf01/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index fa0cb6e..adf23c0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -32,11 +32,14 @@ import java.util.Map.Entry;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
+import javax.security.sasl.SaslException;
+
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.ietf.jgss.GSSException;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -663,6 +666,11 @@ public class RetryPolicies {
 + retries + ") exceeded maximum allowed (" + maxRetries + ")");
   }
 
+  if (isSaslFailure(e)) {
+  return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
+  "SASL failure");
+  }
+
   if (e instanceof ConnectException ||
   e instanceof EOFException ||
   e instanceof NoRouteToHostException ||
@@ -716,7 +724,7 @@ public class RetryPolicies {
   private static long calculateExponentialTime(long time, int retries) {
 return calculateExponentialTime(time, retries, Long.MAX_VALUE);
   }
-  
+
   private static boolean isWrappedStandbyException(Exception e) {
 if (!(e instanceof RemoteException)) {
   return false;
@@ -725,6 +733,18 @@ public class RetryPolicies {
 StandbyException.class);
 return unwrapped instanceof StandbyException;
   }
+
+  private static boolean isSaslFailure(Exception e) {
+  Throwable current = e;
+  do {
+  if (current instanceof SaslException) {
+return true;
+  }
+  current = current.getCause();
+  } while (current != null);
+
+  return false;
+  }
   
   static RetriableException getWrappedRetriableException(Exception e) {
 if (!(e instanceof RemoteException)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2efaf01/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 649af89..1accb0a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -39,6 +39,8 @@ import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
+import javax.security.sasl.SaslException;
+
 import static org.apache.hadoop.io.retry.RetryPolicies.*;
 import static org.junit.Assert.*;
 import static org.mockito.Matchers.any;
@@ -326,4 +328,24 @@ public class TestRetryProxy {
 assertEquals(InterruptedException.class, e.getCause().getClass());
 assertEquals("sleep interrupted", e.getCause

hadoop git commit: YARN-7386. Duplicate Strings in various places in Yarn memory (mi...@cloudera.com via rkanter)

2017-11-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 933a09e88 -> 82abc7224


YARN-7386. Duplicate Strings in various places in Yarn memory 
(mi...@cloudera.com via rkanter)

(cherry picked from commit a2c150a7369cc629bbfaa2dfa3a8495b6f9c42e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82abc722
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82abc722
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82abc722

Branch: refs/heads/branch-3.0
Commit: 82abc7224f8d2b926dc41e0918a6e75ca54f0fcf
Parents: 933a09e
Author: Robert Kanter 
Authored: Thu Nov 9 12:07:46 2017 -0800
Committer: Robert Kanter 
Committed: Thu Nov 9 12:13:09 2017 -0800

--
 .../impl/pb/ContainerLaunchContextPBImpl.java   | 16 
 .../yarn/api/records/impl/pb/ContainerPBImpl.java   |  2 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  7 ---
 .../rmapp/attempt/RMAppAttemptImpl.java |  3 ++-
 4 files changed, 19 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82abc722/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
index d722cc5..a9f2ee3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
@@ -27,6 +27,7 @@ import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerRetryContext;
@@ -392,7 +393,8 @@ extends ContainerLaunchContext {
 this.environment = new HashMap();
 
 for (StringStringMapProto c : list) {
-  this.environment.put(c.getKey(), c.getValue());
+  this.environment.put(StringInterner.weakIntern(c.getKey()),
+  StringInterner.weakIntern(c.getValue()));
 }
   }
   
@@ -402,7 +404,10 @@ extends ContainerLaunchContext {
   return;
 initEnv();
 this.environment.clear();
-this.environment.putAll(env);
+for (Map.Entry e : env.entrySet()) {
+  this.environment.put(StringInterner.weakIntern(e.getKey()),
+  StringInterner.weakIntern(e.getValue()));
+}
   }
   
   private void addEnvToProto() {
@@ -464,7 +469,7 @@ extends ContainerLaunchContext {
 
 for (ApplicationACLMapProto aclProto : list) {
   this.applicationACLS.put(ProtoUtils.convertFromProtoFormat(aclProto
-  .getAccessType()), aclProto.getAcl());
+  .getAccessType()), StringInterner.weakIntern(aclProto.getAcl()));
 }
   }
 
@@ -513,7 +518,10 @@ extends ContainerLaunchContext {
   return;
 initApplicationACLs();
 this.applicationACLS.clear();
-this.applicationACLS.putAll(appACLs);
+for (Map.Entry e : appACLs.entrySet()) {
+  this.applicationACLS.put(e.getKey(),
+  StringInterner.weakIntern(e.getValue()));
+}
   }
 
   public ContainerRetryContext getContainerRetryContext() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82abc722/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
index b6e22d1..be84938 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
@@ -181,7 +181,7 @@ public class ContainerPBImpl extends Container {
   builder.clearNodeHttpAddress();
   return;
 }
-builder.setNodeHttpAddress(nodeHttpAddress);
+builder.setNodeHttpAddr

hadoop git commit: YARN-7386. Duplicate Strings in various places in Yarn memory (mi...@cloudera.com via rkanter)

2017-11-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk ba8136615 -> a2c150a73


YARN-7386. Duplicate Strings in various places in Yarn memory 
(mi...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2c150a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2c150a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2c150a7

Branch: refs/heads/trunk
Commit: a2c150a7369cc629bbfaa2dfa3a8495b6f9c42e2
Parents: ba81366
Author: Robert Kanter 
Authored: Thu Nov 9 12:07:46 2017 -0800
Committer: Robert Kanter 
Committed: Thu Nov 9 12:12:52 2017 -0800

--
 .../impl/pb/ContainerLaunchContextPBImpl.java   | 16 
 .../yarn/api/records/impl/pb/ContainerPBImpl.java   |  2 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  7 ---
 .../rmapp/attempt/RMAppAttemptImpl.java |  3 ++-
 4 files changed, 19 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c150a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
index d722cc5..a9f2ee3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
@@ -27,6 +27,7 @@ import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerRetryContext;
@@ -392,7 +393,8 @@ extends ContainerLaunchContext {
 this.environment = new HashMap();
 
 for (StringStringMapProto c : list) {
-  this.environment.put(c.getKey(), c.getValue());
+  this.environment.put(StringInterner.weakIntern(c.getKey()),
+  StringInterner.weakIntern(c.getValue()));
 }
   }
   
@@ -402,7 +404,10 @@ extends ContainerLaunchContext {
   return;
 initEnv();
 this.environment.clear();
-this.environment.putAll(env);
+for (Map.Entry e : env.entrySet()) {
+  this.environment.put(StringInterner.weakIntern(e.getKey()),
+  StringInterner.weakIntern(e.getValue()));
+}
   }
   
   private void addEnvToProto() {
@@ -464,7 +469,7 @@ extends ContainerLaunchContext {
 
 for (ApplicationACLMapProto aclProto : list) {
   this.applicationACLS.put(ProtoUtils.convertFromProtoFormat(aclProto
-  .getAccessType()), aclProto.getAcl());
+  .getAccessType()), StringInterner.weakIntern(aclProto.getAcl()));
 }
   }
 
@@ -513,7 +518,10 @@ extends ContainerLaunchContext {
   return;
 initApplicationACLs();
 this.applicationACLS.clear();
-this.applicationACLS.putAll(appACLs);
+for (Map.Entry e : appACLs.entrySet()) {
+  this.applicationACLS.put(e.getKey(),
+  StringInterner.weakIntern(e.getValue()));
+}
   }
 
   public ContainerRetryContext getContainerRetryContext() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c150a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
index b6e22d1..be84938 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
@@ -181,7 +181,7 @@ public class ContainerPBImpl extends Container {
   builder.clearNodeHttpAddress();
   return;
 }
-builder.setNodeHttpAddress(nodeHttpAddress);
+builder.setNodeHttpAddress(nodeHttpAddress.intern());
   }
 
   @Override

http://git-wip-us.apache.org/repos/

hadoop git commit: YARN-7389. Make TestResourceManager Scheduler agnostic. (Robert Kanter via Haibo Chen)

2017-11-01 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 951d5a46f -> 955f3c30e


YARN-7389. Make TestResourceManager Scheduler agnostic. (Robert Kanter via 
Haibo Chen)

(cherry picked from commit d7f3737f3b3236fbf8c25fdd4656251ed63a2be9)
(cherry picked from commit 32a712a7406dd1e742131d1f0aa01279c8335ec1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/955f3c30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/955f3c30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/955f3c30

Branch: refs/heads/branch-2.9
Commit: 955f3c30e48a65ff61a9017e0fe68649b85c985d
Parents: 951d5a4
Author: Haibo Chen 
Authored: Tue Oct 24 22:17:56 2017 -0700
Committer: Robert Kanter 
Committed: Wed Nov 1 14:22:53 2017 -0700

--
 .../yarn/server/resourcemanager/TestResourceManager.java| 9 ++---
 1 file changed, 2 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/955f3c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index ad8c335..941e477 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
@@ -58,8 +58,6 @@ public class TestResourceManager {
   @Before
   public void setUp() throws Exception {
 Configuration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 UserGroupInformation.setConfiguration(conf);
 resourceManager = new ResourceManager();
 resourceManager.init(conf);
@@ -133,6 +131,7 @@ public class TestResourceManager {
 
// Send a heartbeat to kick the tires on the Scheduler
 nodeUpdate(nm1);
+((AbstractYarnScheduler)resourceManager.getResourceScheduler()).update();
 
 // Get allocations from the scheduler
 application.schedule();
@@ -262,8 +261,6 @@ public class TestResourceManager {
 }
   };
   Configuration conf = new YarnConfiguration();
-  conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   conf.set("hadoop.security.authentication", "kerberos");
   conf.set("hadoop.http.authentication.type", "kerberos");
@@ -298,8 +295,6 @@ public class TestResourceManager {
 for (String filterInitializer : simpleFilterInitializers) {
   resourceManager = new ResourceManager();
   Configuration conf = new YarnConfiguration();
-  conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   try {
 UserGroupInformation.setConfiguration(conf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7389. Make TestResourceManager Scheduler agnostic. (Robert Kanter via Haibo Chen)

2017-11-01 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 956d81403 -> 32a712a74


YARN-7389. Make TestResourceManager Scheduler agnostic. (Robert Kanter via 
Haibo Chen)

(cherry picked from commit d7f3737f3b3236fbf8c25fdd4656251ed63a2be9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32a712a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32a712a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32a712a7

Branch: refs/heads/branch-2
Commit: 32a712a7406dd1e742131d1f0aa01279c8335ec1
Parents: 956d814
Author: Haibo Chen 
Authored: Tue Oct 24 22:17:56 2017 -0700
Committer: Robert Kanter 
Committed: Wed Nov 1 14:19:59 2017 -0700

--
 .../yarn/server/resourcemanager/TestResourceManager.java| 9 ++---
 1 file changed, 2 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32a712a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index ad8c335..941e477 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
@@ -58,8 +58,6 @@ public class TestResourceManager {
   @Before
   public void setUp() throws Exception {
 Configuration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 UserGroupInformation.setConfiguration(conf);
 resourceManager = new ResourceManager();
 resourceManager.init(conf);
@@ -133,6 +131,7 @@ public class TestResourceManager {
 
// Send a heartbeat to kick the tires on the Scheduler
 nodeUpdate(nm1);
+((AbstractYarnScheduler)resourceManager.getResourceScheduler()).update();
 
 // Get allocations from the scheduler
 application.schedule();
@@ -262,8 +261,6 @@ public class TestResourceManager {
 }
   };
   Configuration conf = new YarnConfiguration();
-  conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   conf.set("hadoop.security.authentication", "kerberos");
   conf.set("hadoop.http.authentication.type", "kerberos");
@@ -298,8 +295,6 @@ public class TestResourceManager {
 for (String filterInitializer : simpleFilterInitializers) {
   resourceManager = new ResourceManager();
   Configuration conf = new YarnConfiguration();
-  conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   try {
 UserGroupInformation.setConfiguration(conf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7262. Add a hierarchy into the ZKRMStateStore for delegation token znodes to prevent jute buffer overflow (rkanter)

2017-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 952aa3f16 -> 7c26ae506


YARN-7262. Add a hierarchy into the ZKRMStateStore for delegation token znodes 
to prevent jute buffer overflow (rkanter)

(cherry picked from commit b1de78619f3e5e25d6f9d5eaf41925f22d212fb9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c26ae50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c26ae50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c26ae50

Branch: refs/heads/branch-2
Commit: 7c26ae506af97254adeaad9642cbfb519c6865be
Parents: 952aa3f
Author: Robert Kanter 
Authored: Thu Oct 26 17:47:32 2017 -0700
Committer: Robert Kanter 
Committed: Thu Oct 26 17:51:05 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   7 +
 .../src/main/resources/yarn-default.xml |  18 +
 .../resourcemanager/recovery/RMStateStore.java  |   2 +
 .../recovery/ZKRMStateStore.java| 411 +--
 .../recovery/TestZKRMStateStore.java| 372 -
 5 files changed, 673 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c26ae50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 69dd080..96f6c57 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -610,6 +610,13 @@ public class YarnConfiguration extends Configuration {
   RM_ZK_PREFIX + "appid-node.split-index";
   public static final int DEFAULT_ZK_APPID_NODE_SPLIT_INDEX = 0;
 
+  /** Index at which the RM Delegation Token ids will be split so that the
+   * delegation token znodes stored in the zookeeper RM state store will be
+   * stored as two different znodes (parent-child). **/
+  public static final  String ZK_DELEGATION_TOKEN_NODE_SPLIT_INDEX =
+  RM_ZK_PREFIX + "delegation-token-node.split-index";
+  public static final int DEFAULT_ZK_DELEGATION_TOKEN_NODE_SPLIT_INDEX = 0;
+
   public static final String RM_ZK_ACL = RM_ZK_PREFIX + "acl";
   public static final String DEFAULT_RM_ZK_ACL = "world:anyone:rwcda";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c26ae50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a5f5698..937b7b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -604,6 +604,24 @@
   
 
   
+Index at which the RM Delegation Token ids will be split so
+  that the delegation token znodes stored in the zookeeper RM state store
+  will be stored as two different znodes (parent-child). The split is done
+  from the end. For instance, with no split, a delegation token znode will
+  be of the form RMDelegationToken_123456789. If the value of this config 
is
+  1, the delegation token znode will be broken into two parts:
+  RMDelegationToken_12345678 and 9 respectively with former being the 
parent
+  node. This config can take values from 0 to 4. 0 means there will be no
+  split. If the value is outside this range, it will be treated as 0 (i.e.
+  no split). A value larger than 0 (up to 4) should be configured if you 
are
+  running a large number of applications, with long-lived delegation tokens
+  and state store operations (e.g. failover) are failing due to LenError in
+  Zookeeper.
+yarn.resourcemanager.zk-delegation-token-node.split-index
+0
+  
+
+  
 Specifies the maximum size of the data that can be stored
   in a znode. Value should be same or less than jute.maxbuffer configured
   in zookeeper. Default value configured is 1MB.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c26ae50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java

hadoop git commit: YARN-7262. Add a hierarchy into the ZKRMStateStore for delegation token znodes to prevent jute buffer overflow (rkanter)

2017-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 cd9078df6 -> aa65f6c1a


YARN-7262. Add a hierarchy into the ZKRMStateStore for delegation token znodes 
to prevent jute buffer overflow (rkanter)

(cherry picked from commit b1de78619f3e5e25d6f9d5eaf41925f22d212fb9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa65f6c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa65f6c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa65f6c1

Branch: refs/heads/branch-3.0
Commit: aa65f6c1ad0eadd0169d89e0ec83fb2b49693ae5
Parents: cd9078d
Author: Robert Kanter 
Authored: Thu Oct 26 17:47:32 2017 -0700
Committer: Robert Kanter 
Committed: Thu Oct 26 17:47:51 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   7 +
 .../src/main/resources/yarn-default.xml |  18 +
 .../resourcemanager/recovery/RMStateStore.java  |   2 +
 .../recovery/ZKRMStateStore.java| 411 +--
 .../recovery/TestZKRMStateStore.java| 372 -
 5 files changed, 673 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa65f6c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 0f4080a..8718809 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -610,6 +610,13 @@ public class YarnConfiguration extends Configuration {
   RM_ZK_PREFIX + "appid-node.split-index";
   public static final int DEFAULT_ZK_APPID_NODE_SPLIT_INDEX = 0;
 
+  /** Index at which the RM Delegation Token ids will be split so that the
+   * delegation token znodes stored in the zookeeper RM state store will be
+   * stored as two different znodes (parent-child). **/
+  public static final  String ZK_DELEGATION_TOKEN_NODE_SPLIT_INDEX =
+  RM_ZK_PREFIX + "delegation-token-node.split-index";
+  public static final int DEFAULT_ZK_DELEGATION_TOKEN_NODE_SPLIT_INDEX = 0;
+
   public static final String RM_ZK_ACL = RM_ZK_PREFIX + "acl";
   public static final String DEFAULT_RM_ZK_ACL = "world:anyone:rwcda";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa65f6c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index ccfe10a..06a7add 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -594,6 +594,24 @@
   
 
   
+Index at which the RM Delegation Token ids will be split so
+  that the delegation token znodes stored in the zookeeper RM state store
+  will be stored as two different znodes (parent-child). The split is done
+  from the end. For instance, with no split, a delegation token znode will
+  be of the form RMDelegationToken_123456789. If the value of this config 
is
+  1, the delegation token znode will be broken into two parts:
+  RMDelegationToken_12345678 and 9 respectively with former being the 
parent
+  node. This config can take values from 0 to 4. 0 means there will be no
+  split. If the value is outside this range, it will be treated as 0 (i.e.
+  no split). A value larger than 0 (up to 4) should be configured if you 
are
+  running a large number of applications, with long-lived delegation tokens
+  and state store operations (e.g. failover) are failing due to LenError in
+  Zookeeper.
+yarn.resourcemanager.zk-delegation-token-node.split-index
+0
+  
+
+  
 Specifies the maximum size of the data that can be stored
   in a znode. Value should be same or less than jute.maxbuffer configured
   in zookeeper. Default value configured is 1MB.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa65f6c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server

hadoop git commit: YARN-7262. Add a hierarchy into the ZKRMStateStore for delegation token znodes to prevent jute buffer overflow (rkanter)

2017-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 088ffee71 -> b1de78619


YARN-7262. Add a hierarchy into the ZKRMStateStore for delegation token znodes 
to prevent jute buffer overflow (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1de7861
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1de7861
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1de7861

Branch: refs/heads/trunk
Commit: b1de78619f3e5e25d6f9d5eaf41925f22d212fb9
Parents: 088ffee
Author: Robert Kanter 
Authored: Thu Oct 26 17:47:32 2017 -0700
Committer: Robert Kanter 
Committed: Thu Oct 26 17:47:32 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   7 +
 .../src/main/resources/yarn-default.xml |  18 +
 .../resourcemanager/recovery/RMStateStore.java  |   2 +
 .../recovery/ZKRMStateStore.java| 411 +--
 .../recovery/TestZKRMStateStore.java| 372 -
 5 files changed, 673 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1de7861/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 0d5f2cb..9e5b8e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -639,6 +639,13 @@ public class YarnConfiguration extends Configuration {
   RM_ZK_PREFIX + "appid-node.split-index";
   public static final int DEFAULT_ZK_APPID_NODE_SPLIT_INDEX = 0;
 
+  /** Index at which the RM Delegation Token ids will be split so that the
+   * delegation token znodes stored in the zookeeper RM state store will be
+   * stored as two different znodes (parent-child). **/
+  public static final  String ZK_DELEGATION_TOKEN_NODE_SPLIT_INDEX =
+  RM_ZK_PREFIX + "delegation-token-node.split-index";
+  public static final int DEFAULT_ZK_DELEGATION_TOKEN_NODE_SPLIT_INDEX = 0;
+
   public static final String RM_ZK_ACL = RM_ZK_PREFIX + "acl";
   public static final String DEFAULT_RM_ZK_ACL = "world:anyone:rwcda";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1de7861/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 4e78947..97ecdac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -594,6 +594,24 @@
   
 
   
+Index at which the RM Delegation Token ids will be split so
+  that the delegation token znodes stored in the zookeeper RM state store
+  will be stored as two different znodes (parent-child). The split is done
+  from the end. For instance, with no split, a delegation token znode will
+  be of the form RMDelegationToken_123456789. If the value of this config 
is
+  1, the delegation token znode will be broken into two parts:
+  RMDelegationToken_12345678 and 9 respectively with former being the 
parent
+  node. This config can take values from 0 to 4. 0 means there will be no
+  split. If the value is outside this range, it will be treated as 0 (i.e.
+  no split). A value larger than 0 (up to 4) should be configured if you 
are
+  running a large number of applications, with long-lived delegation tokens
+  and state store operations (e.g. failover) are failing due to LenError in
+  Zookeeper.
+yarn.resourcemanager.zk-delegation-token-node.split-index
+0
+  
+
+  
 Specifies the maximum size of the data that can be stored
   in a znode. Value should be same or less than jute.maxbuffer configured
   in zookeeper. Default value configured is 1MB.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1de7861/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
--
diff --git 
a/hadoop-yarn-project

hadoop git commit: YARN-7320. Duplicate LiteralByteStrings in SystemCredentialsForAppsProto.credentialsForApp_ Addendum (mi...@cloudera.com via rkanter)

2017-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 9665971a6 -> cd9078df6


YARN-7320. Duplicate LiteralByteStrings in 
SystemCredentialsForAppsProto.credentialsForApp_ Addendum (mi...@cloudera.com 
via rkanter)

(cherry picked from commit 088ffee7165d0e2e4fb9af7fb8f33626b0ed8ed3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd9078df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd9078df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd9078df

Branch: refs/heads/branch-3.0
Commit: cd9078df683cf5029f2b3d974ff6880de3ae6705
Parents: 9665971
Author: Robert Kanter 
Authored: Thu Oct 26 15:50:14 2017 -0700
Committer: Robert Kanter 
Committed: Thu Oct 26 15:55:23 2017 -0700

--
 .../api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9078df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
index ccc4191..9af5bfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -156,7 +156,7 @@ public class NodeHeartbeatResponsePBImpl extends 
NodeHeartbeatResponse {
   
builder.addSystemCredentialsForApps(SystemCredentialsForAppsProto.newBuilder()
 .setAppId(convertToProtoFormat(entry.getKey()))
 .setCredentialsForApp(BYTE_STRING_INTERNER.intern(
-ProtoUtils.convertToProtoFormat(entry.getValue();
+ProtoUtils.convertToProtoFormat(entry.getValue().duplicate();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7320. Duplicate LiteralByteStrings in SystemCredentialsForAppsProto.credentialsForApp_ Addendum (mi...@cloudera.com via rkanter)

2017-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 25932da6d -> 088ffee71


YARN-7320. Duplicate LiteralByteStrings in 
SystemCredentialsForAppsProto.credentialsForApp_ Addendum (mi...@cloudera.com 
via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/088ffee7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/088ffee7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/088ffee7

Branch: refs/heads/trunk
Commit: 088ffee7165d0e2e4fb9af7fb8f33626b0ed8ed3
Parents: 25932da
Author: Robert Kanter 
Authored: Thu Oct 26 15:50:14 2017 -0700
Committer: Robert Kanter 
Committed: Thu Oct 26 15:50:14 2017 -0700

--
 .../api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/088ffee7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
index ccc4191..9af5bfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -156,7 +156,7 @@ public class NodeHeartbeatResponsePBImpl extends 
NodeHeartbeatResponse {
   
builder.addSystemCredentialsForApps(SystemCredentialsForAppsProto.newBuilder()
 .setAppId(convertToProtoFormat(entry.getKey()))
 .setCredentialsForApp(BYTE_STRING_INTERNER.intern(
-ProtoUtils.convertToProtoFormat(entry.getValue();
+ProtoUtils.convertToProtoFormat(entry.getValue().duplicate();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7358. TestZKConfigurationStore and TestLeveldbConfigurationStore should explicitly set capacity scheduler (haibochen via rkanter)

2017-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 13f1a7e0e -> 46783158f


YARN-7358. TestZKConfigurationStore and TestLeveldbConfigurationStore should 
explicitly set capacity scheduler (haibochen via rkanter)

(cherry picked from commit 2da654e34a436aae266c1fbdec5c1067da8d854e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46783158
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46783158
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46783158

Branch: refs/heads/branch-2
Commit: 46783158f8b944a45428ebaedc32c7ce7742fe04
Parents: 13f1a7e
Author: Robert Kanter 
Authored: Thu Oct 26 10:25:10 2017 -0700
Committer: Robert Kanter 
Committed: Thu Oct 26 10:49:57 2017 -0700

--
 .../scheduler/capacity/conf/ConfigurationStoreBaseTest.java  | 4 
 .../scheduler/capacity/conf/TestLeveldbConfigurationStore.java   | 1 -
 .../scheduler/capacity/conf/TestZKConfigurationStore.java| 3 ++-
 3 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46783158/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
index 8f3bc71..0f50b53 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
@@ -21,7 +21,9 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -46,6 +48,8 @@ public abstract class ConfigurationStoreBaseTest {
   @Before
   public void setUp() throws Exception {
 this.conf = new Configuration();
+this.conf.setClass(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class, CapacityScheduler.class);
 this.schedConf = new Configuration(false);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46783158/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
index 324cbee..ece32d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
@@ -28,7 +28,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46783158/hadoop-yarn-proj

hadoop git commit: YARN-7358. TestZKConfigurationStore and TestLeveldbConfigurationStore should explicitly set capacity scheduler (haibochen via rkanter)

2017-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 82c9b3bbb -> d92dddaf7


YARN-7358. TestZKConfigurationStore and TestLeveldbConfigurationStore should 
explicitly set capacity scheduler (haibochen via rkanter)

(cherry picked from commit 2da654e34a436aae266c1fbdec5c1067da8d854e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d92dddaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d92dddaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d92dddaf

Branch: refs/heads/branch-3.0
Commit: d92dddaf7c57744a0a7c5233fe4c95aa74bde0c0
Parents: 82c9b3b
Author: Robert Kanter 
Authored: Thu Oct 26 10:25:10 2017 -0700
Committer: Robert Kanter 
Committed: Thu Oct 26 10:45:23 2017 -0700

--
 .../scheduler/capacity/conf/ConfigurationStoreBaseTest.java  | 4 
 .../scheduler/capacity/conf/TestLeveldbConfigurationStore.java   | 1 -
 .../scheduler/capacity/conf/TestZKConfigurationStore.java| 3 ++-
 3 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d92dddaf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
index 8f3bc71..0f50b53 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
@@ -21,7 +21,9 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -46,6 +48,8 @@ public abstract class ConfigurationStoreBaseTest {
   @Before
   public void setUp() throws Exception {
 this.conf = new Configuration();
+this.conf.setClass(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class, CapacityScheduler.class);
 this.schedConf = new Configuration(false);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d92dddaf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
index 324cbee..ece32d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
@@ -28,7 +28,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d92dddaf/hadoop-yarn-proj

hadoop git commit: YARN-7358. TestZKConfigurationStore and TestLeveldbConfigurationStore should explicitly set capacity scheduler (haibochen via rkanter)

2017-10-26 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 625039ef2 -> 2da654e34


YARN-7358. TestZKConfigurationStore and TestLeveldbConfigurationStore should 
explicitly set capacity scheduler (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2da654e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2da654e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2da654e3

Branch: refs/heads/trunk
Commit: 2da654e34a436aae266c1fbdec5c1067da8d854e
Parents: 625039e
Author: Robert Kanter 
Authored: Thu Oct 26 10:25:10 2017 -0700
Committer: Robert Kanter 
Committed: Thu Oct 26 10:25:10 2017 -0700

--
 .../scheduler/capacity/conf/ConfigurationStoreBaseTest.java  | 4 
 .../scheduler/capacity/conf/TestLeveldbConfigurationStore.java   | 1 -
 .../scheduler/capacity/conf/TestZKConfigurationStore.java| 3 ++-
 3 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2da654e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
index 8f3bc71..0f50b53 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/ConfigurationStoreBaseTest.java
@@ -21,7 +21,9 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -46,6 +48,8 @@ public abstract class ConfigurationStoreBaseTest {
   @Before
   public void setUp() throws Exception {
 this.conf = new Configuration();
+this.conf.setClass(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class, CapacityScheduler.class);
 this.schedConf = new Configuration(false);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2da654e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
index 324cbee..ece32d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/TestLeveldbConfigurationStore.java
@@ -28,7 +28,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 import org.junit.Before;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2da654e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/j

hadoop git commit: YARN-7385. TestFairScheduler#testUpdateDemand and TestFSLeafQueue#testUpdateDemand are failing with NPE (yufeigu via rkanter)

2017-10-24 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1d34a4805 -> 85eadec67


YARN-7385. TestFairScheduler#testUpdateDemand and 
TestFSLeafQueue#testUpdateDemand are failing with NPE (yufeigu via rkanter)

(cherry picked from commit 03af442e7608db2f8e6eb85a15aa0ba42781edab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85eadec6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85eadec6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85eadec6

Branch: refs/heads/branch-2
Commit: 85eadec67b3e30546f1557f9f5991910248a15b2
Parents: 1d34a48
Author: Robert Kanter 
Authored: Tue Oct 24 13:36:50 2017 -0700
Committer: Robert Kanter 
Committed: Tue Oct 24 13:43:27 2017 -0700

--
 .../server/resourcemanager/scheduler/fair/TestFSLeafQueue.java | 1 +
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java   | 2 ++
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85eadec6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
index 0bba35d..4a738ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
@@ -88,6 +88,7 @@ public class TestFSLeafQueue extends FairSchedulerTestBase {
 
 FSAppAttempt app = mock(FSAppAttempt.class);
 Mockito.when(app.getDemand()).thenReturn(maxResource);
+Mockito.when(app.getResourceUsage()).thenReturn(Resources.none());
 
 schedulable.addApp(app, true);
 schedulable.addApp(app, true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85eadec6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index f1092bc..a89ba2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5251,8 +5251,10 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 
 FSAppAttempt app1 = mock(FSAppAttempt.class);
 Mockito.when(app1.getDemand()).thenReturn(maxResource);
+Mockito.when(app1.getResourceUsage()).thenReturn(Resources.none());
 FSAppAttempt app2 = mock(FSAppAttempt.class);
 Mockito.when(app2.getDemand()).thenReturn(maxResource);
+Mockito.when(app2.getResourceUsage()).thenReturn(Resources.none());
 
 QueueManager queueManager = scheduler.getQueueManager();
 FSParentQueue queue1 = queueManager.getParentQueue("queue1", true);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7385. TestFairScheduler#testUpdateDemand and TestFSLeafQueue#testUpdateDemand are failing with NPE (yufeigu via rkanter)

2017-10-24 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 8b200ab39 -> c5eeb2e4c


YARN-7385. TestFairScheduler#testUpdateDemand and 
TestFSLeafQueue#testUpdateDemand are failing with NPE (yufeigu via rkanter)

(cherry picked from commit 03af442e7608db2f8e6eb85a15aa0ba42781edab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5eeb2e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5eeb2e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5eeb2e4

Branch: refs/heads/branch-3.0
Commit: c5eeb2e4ced3b672d52438c0a1c53d2089698f5a
Parents: 8b200ab
Author: Robert Kanter 
Authored: Tue Oct 24 13:36:50 2017 -0700
Committer: Robert Kanter 
Committed: Tue Oct 24 13:40:06 2017 -0700

--
 .../server/resourcemanager/scheduler/fair/TestFSLeafQueue.java | 1 +
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java   | 2 ++
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5eeb2e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
index 74a905e..551aa51 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
@@ -88,6 +88,7 @@ public class TestFSLeafQueue extends FairSchedulerTestBase {
 
 FSAppAttempt app = mock(FSAppAttempt.class);
 Mockito.when(app.getDemand()).thenReturn(maxResource);
+Mockito.when(app.getResourceUsage()).thenReturn(Resources.none());
 
 schedulable.addAppSchedulable(app);
 schedulable.addAppSchedulable(app);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5eeb2e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 3fb8d5a..98eee4e9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5249,8 +5249,10 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 
 FSAppAttempt app1 = mock(FSAppAttempt.class);
 Mockito.when(app1.getDemand()).thenReturn(maxResource);
+Mockito.when(app1.getResourceUsage()).thenReturn(Resources.none());
 FSAppAttempt app2 = mock(FSAppAttempt.class);
 Mockito.when(app2.getDemand()).thenReturn(maxResource);
+Mockito.when(app2.getResourceUsage()).thenReturn(Resources.none());
 
 QueueManager queueManager = scheduler.getQueueManager();
 FSParentQueue queue1 = queueManager.getParentQueue("queue1", true);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7385. TestFairScheduler#testUpdateDemand and TestFSLeafQueue#testUpdateDemand are failing with NPE (yufeigu via rkanter)

2017-10-24 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1c5c2b5dd -> 03af442e7


YARN-7385. TestFairScheduler#testUpdateDemand and 
TestFSLeafQueue#testUpdateDemand are failing with NPE (yufeigu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03af442e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03af442e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03af442e

Branch: refs/heads/trunk
Commit: 03af442e7608db2f8e6eb85a15aa0ba42781edab
Parents: 1c5c2b5
Author: Robert Kanter 
Authored: Tue Oct 24 13:36:50 2017 -0700
Committer: Robert Kanter 
Committed: Tue Oct 24 13:36:50 2017 -0700

--
 .../server/resourcemanager/scheduler/fair/TestFSLeafQueue.java | 1 +
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java   | 2 ++
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03af442e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
index 0bba35d..4a738ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
@@ -88,6 +88,7 @@ public class TestFSLeafQueue extends FairSchedulerTestBase {
 
 FSAppAttempt app = mock(FSAppAttempt.class);
 Mockito.when(app.getDemand()).thenReturn(maxResource);
+Mockito.when(app.getResourceUsage()).thenReturn(Resources.none());
 
 schedulable.addApp(app, true);
 schedulable.addApp(app, true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03af442e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 944db1a..cd0570a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5252,8 +5252,10 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 
 FSAppAttempt app1 = mock(FSAppAttempt.class);
 Mockito.when(app1.getDemand()).thenReturn(maxResource);
+Mockito.when(app1.getResourceUsage()).thenReturn(Resources.none());
 FSAppAttempt app2 = mock(FSAppAttempt.class);
 Mockito.when(app2.getDemand()).thenReturn(maxResource);
+Mockito.when(app2.getResourceUsage()).thenReturn(Resources.none());
 
 QueueManager queueManager = scheduler.getQueueManager();
 FSParentQueue queue1 = queueManager.getParentQueue("queue1", true);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7382. NoSuchElementException in FairScheduler after failover causes RM crash (rkanter)

2017-10-24 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3d36f75f2 -> 1d34a4805


YARN-7382. NoSuchElementException in FairScheduler after failover causes RM 
crash (rkanter)

(cherry picked from commit 025c6565725c1819566377632753e8b9055617a6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d34a480
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d34a480
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d34a480

Branch: refs/heads/branch-2
Commit: 1d34a4805e0b5472bb039ae05cdb052e2976ca14
Parents: 3d36f75
Author: Robert Kanter 
Authored: Tue Oct 24 10:21:44 2017 -0700
Committer: Robert Kanter 
Committed: Tue Oct 24 10:29:36 2017 -0700

--
 .../scheduler/fair/FSAppAttempt.java| 10 ++
 .../TestWorkPreservingRMRestart.java| 21 +---
 2 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d34a480/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 006acea..21863b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -665,6 +665,16 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   if (!rmContainer.getState().equals(RMContainerState.COMPLETED)) {
 getQueue().incUsedResource(rmContainer.getContainer().getResource());
   }
+
+  // If not running unmanaged, the first container we recover is always
+  // the AM. Set the amResource for this app and update the leaf queue's AM
+  // usage
+  if (!isAmRunning() && !getUnmanagedAM()) {
+Resource resource = rmContainer.getAllocatedResource();
+setAMResource(resource);
+getQueue().addAMResourceUsage(resource);
+setAmRunning(true);
+  }
 } finally {
   writeLock.unlock();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d34a480/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index eb73db1..59f6092 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueu
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSParentQueue;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerTestBase;
@@ -154,6 +155,7 @@ public class TestWorkPreservingRMRestart extends 
ParameterizedSchedulerTestBase
 new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
 nm1.registerNode();
 RMApp app1 = rm1.submitApp(200);
+Resource amResources = app1.getAMResourceReque

hadoop git commit: YARN-7382. NoSuchElementException in FairScheduler after failover causes RM crash (rkanter)

2017-10-24 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1e7ea6604 -> 4bae02a97


YARN-7382. NoSuchElementException in FairScheduler after failover causes RM 
crash (rkanter)

(cherry picked from commit 025c6565725c1819566377632753e8b9055617a6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bae02a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bae02a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bae02a9

Branch: refs/heads/branch-3.0
Commit: 4bae02a97d59b8f5b309156675ba5773cadef17c
Parents: 1e7ea66
Author: Robert Kanter 
Authored: Tue Oct 24 10:21:44 2017 -0700
Committer: Robert Kanter 
Committed: Tue Oct 24 10:25:43 2017 -0700

--
 .../scheduler/fair/FSAppAttempt.java| 10 ++
 .../TestWorkPreservingRMRestart.java| 21 +---
 2 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bae02a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 0c50394..72675b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -665,6 +665,16 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   if (!rmContainer.getState().equals(RMContainerState.COMPLETED)) {
 getQueue().incUsedResource(rmContainer.getContainer().getResource());
   }
+
+  // If not running unmanaged, the first container we recover is always
+  // the AM. Set the amResource for this app and update the leaf queue's AM
+  // usage
+  if (!isAmRunning() && !getUnmanagedAM()) {
+Resource resource = rmContainer.getAllocatedResource();
+setAMResource(resource);
+getQueue().addAMResourceUsage(resource);
+setAmRunning(true);
+  }
 } finally {
   writeLock.unlock();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bae02a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index c1cb4c1..064e217 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueu
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSParentQueue;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerTestBase;
@@ -158,6 +159,7 @@ public class TestWorkPreservingRMRestart extends 
ParameterizedSchedulerTestBase
 new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
 nm1.registerNode();
 RMApp app1 = rm1.submitApp(200);
+Resource amResources = app1.get

hadoop git commit: YARN-7382. NoSuchElementException in FairScheduler after failover causes RM crash (rkanter)

2017-10-24 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 679f99b14 -> 025c65657


YARN-7382. NoSuchElementException in FairScheduler after failover causes RM 
crash (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/025c6565
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/025c6565
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/025c6565

Branch: refs/heads/trunk
Commit: 025c6565725c1819566377632753e8b9055617a6
Parents: 679f99b
Author: Robert Kanter 
Authored: Tue Oct 24 10:21:44 2017 -0700
Committer: Robert Kanter 
Committed: Tue Oct 24 10:21:44 2017 -0700

--
 .../scheduler/fair/FSAppAttempt.java| 10 ++
 .../TestWorkPreservingRMRestart.java| 21 +---
 2 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/025c6565/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 8ab6e13..157d264 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -658,6 +658,16 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   if (!rmContainer.getState().equals(RMContainerState.COMPLETED)) {
 getQueue().incUsedResource(rmContainer.getContainer().getResource());
   }
+
+  // If not running unmanaged, the first container we recover is always
+  // the AM. Set the amResource for this app and update the leaf queue's AM
+  // usage
+  if (!isAmRunning() && !getUnmanagedAM()) {
+Resource resource = rmContainer.getAllocatedResource();
+setAMResource(resource);
+getQueue().addAMResourceUsage(resource);
+setAmRunning(true);
+  }
 } finally {
   writeLock.unlock();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/025c6565/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index c1cb4c1..064e217 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueu
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSParentQueue;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerTestBase;
@@ -158,6 +159,7 @@ public class TestWorkPreservingRMRestart extends 
ParameterizedSchedulerTestBase
 new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
 nm1.registerNode();
 RMApp app1 = rm1.submitApp(200);
+Resource amResources = app1.getAMResourceRequests().get(0).getCapability();
 MockAM am1 = MockRM.launchAn

hadoop git commit: YARN-7320. Duplicate LiteralByteStrings in SystemCredentialsForAppsProto.credentialsForApp_ (mi...@cloudera.com via rkanter)

2017-10-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b8a2fd37d -> 61aba4f0c


YARN-7320. Duplicate LiteralByteStrings in 
SystemCredentialsForAppsProto.credentialsForApp_ (mi...@cloudera.com via 
rkanter)

(cherry picked from commit 5da295a34e39b507e8291073782e0576cd06896a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61aba4f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61aba4f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61aba4f0

Branch: refs/heads/branch-3.0
Commit: 61aba4f0c1b20cb783c5fc0aa2344a38fdf3d8b2
Parents: b8a2fd3
Author: Robert Kanter 
Authored: Mon Oct 23 17:56:56 2017 -0700
Committer: Robert Kanter 
Committed: Mon Oct 23 18:02:11 2017 -0700

--
 .../impl/pb/NodeHeartbeatResponsePBImpl.java | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61aba4f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
index bbd1294..ccc4191 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -26,6 +26,10 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.collect.Interner;
+import com.google.common.collect.Interners;
+import com.google.protobuf.ByteString;
+
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerRequestPBImpl;
@@ -84,6 +88,9 @@ public class NodeHeartbeatResponsePBImpl extends 
NodeHeartbeatResponse {
   private List containersToDecrease = null;
   private List containersToSignal = null;
 
+  private static final Interner BYTE_STRING_INTERNER =
+  Interners.newWeakInterner();
+
   public NodeHeartbeatResponsePBImpl() {
 builder = NodeHeartbeatResponseProto.newBuilder();
   }
@@ -148,8 +155,8 @@ public class NodeHeartbeatResponsePBImpl extends 
NodeHeartbeatResponse {
 for (Map.Entry entry : 
systemCredentials.entrySet()) {
   
builder.addSystemCredentialsForApps(SystemCredentialsForAppsProto.newBuilder()
 .setAppId(convertToProtoFormat(entry.getKey()))
-.setCredentialsForApp(ProtoUtils.convertToProtoFormat(
-entry.getValue().duplicate(;
+.setCredentialsForApp(BYTE_STRING_INTERNER.intern(
+ProtoUtils.convertToProtoFormat(entry.getValue();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7320. Duplicate LiteralByteStrings in SystemCredentialsForAppsProto.credentialsForApp_ (mi...@cloudera.com via rkanter)

2017-10-23 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk c904d60c3 -> 5da295a34


YARN-7320. Duplicate LiteralByteStrings in 
SystemCredentialsForAppsProto.credentialsForApp_ (mi...@cloudera.com via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5da295a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5da295a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5da295a3

Branch: refs/heads/trunk
Commit: 5da295a34e39b507e8291073782e0576cd06896a
Parents: c904d60
Author: Robert Kanter 
Authored: Mon Oct 23 17:56:56 2017 -0700
Committer: Robert Kanter 
Committed: Mon Oct 23 17:56:56 2017 -0700

--
 .../impl/pb/NodeHeartbeatResponsePBImpl.java | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5da295a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
index bbd1294..ccc4191 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -26,6 +26,10 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import com.google.common.collect.Interner;
+import com.google.common.collect.Interners;
+import com.google.protobuf.ByteString;
+
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerRequestPBImpl;
@@ -84,6 +88,9 @@ public class NodeHeartbeatResponsePBImpl extends 
NodeHeartbeatResponse {
   private List containersToDecrease = null;
   private List containersToSignal = null;
 
+  private static final Interner BYTE_STRING_INTERNER =
+  Interners.newWeakInterner();
+
   public NodeHeartbeatResponsePBImpl() {
 builder = NodeHeartbeatResponseProto.newBuilder();
   }
@@ -148,8 +155,8 @@ public class NodeHeartbeatResponsePBImpl extends 
NodeHeartbeatResponse {
 for (Map.Entry entry : 
systemCredentials.entrySet()) {
   
builder.addSystemCredentialsForApps(SystemCredentialsForAppsProto.newBuilder()
 .setAppId(convertToProtoFormat(entry.getKey()))
-.setCredentialsForApp(ProtoUtils.convertToProtoFormat(
-entry.getValue().duplicate(;
+.setCredentialsForApp(BYTE_STRING_INTERNER.intern(
+ProtoUtils.convertToProtoFormat(entry.getValue();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14938. Configuration.updatingResource map should be initialized lazily (mi...@cloudera.com via rkanter)

2017-10-16 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1452ba139 -> 8a1ed84c7


HADOOP-14938. Configuration.updatingResource map should be initialized lazily 
(mi...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a1ed84c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a1ed84c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a1ed84c

Branch: refs/heads/branch-2
Commit: 8a1ed84c77bbb665c2a980398cfbab5477ae35ba
Parents: 1452ba1
Author: Robert Kanter 
Authored: Mon Oct 16 16:45:35 2017 -0700
Committer: Robert Kanter 
Committed: Mon Oct 16 16:51:28 2017 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   | 93 
 1 file changed, 55 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a1ed84c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index c2a09fc..17d0dcc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -269,9 +269,9 @@ public class Configuration implements 
Iterable>,
 
   /**
* Stores the mapping of key to the resource which modifies or loads 
-   * the key most recently
+   * the key most recently. Created lazily to avoid wasting memory.
*/
-  private Map updatingResource;
+  private volatile Map updatingResource;
 
   /**
* Specify exact input factory to avoid time finding correct one.
@@ -730,7 +730,6 @@ public class Configuration implements 
Iterable>,
*/
   public Configuration(boolean loadDefaults) {
 this.loadDefaults = loadDefaults;
-updatingResource = new ConcurrentHashMap();
 synchronized(Configuration.class) {
   REGISTRY.put(this, null);
 }
@@ -743,23 +742,25 @@ public class Configuration implements 
Iterable>,
*/
   @SuppressWarnings("unchecked")
   public Configuration(Configuration other) {
-   this.resources = (ArrayList) other.resources.clone();
-   synchronized(other) {
- if (other.properties != null) {
-   this.properties = (Properties)other.properties.clone();
- }
-
- if (other.overlay!=null) {
-   this.overlay = (Properties)other.overlay.clone();
- }
-
- this.updatingResource = new ConcurrentHashMap(
- other.updatingResource);
- this.finalParameters = Collections.newSetFromMap(
- new ConcurrentHashMap());
- this.finalParameters.addAll(other.finalParameters);
-   }
-   
+this.resources = (ArrayList) other.resources.clone();
+synchronized(other) {
+  if (other.properties != null) {
+this.properties = (Properties)other.properties.clone();
+  }
+
+  if (other.overlay!=null) {
+this.overlay = (Properties)other.overlay.clone();
+  }
+
+  if (other.updatingResource != null) {
+this.updatingResource = new ConcurrentHashMap(
+   other.updatingResource);
+  }
+  this.finalParameters = Collections.newSetFromMap(
+  new ConcurrentHashMap());
+  this.finalParameters.addAll(other.finalParameters);
+}
+
 synchronized(Configuration.class) {
   REGISTRY.put(this, null);
 }
@@ -1200,14 +1201,14 @@ public class Configuration implements 
Iterable>,
 String newSource = (source == null ? "programatically" : source);
 
 if (!isDeprecated(name)) {
-  updatingResource.put(name, new String[] {newSource});
+  putIntoUpdatingResource(name, new String[] {newSource});
   String[] altNames = getAlternativeNames(name);
   if(altNames != null) {
 for(String n: altNames) {
   if(!n.equals(name)) {
 getOverlay().setProperty(n, value);
 getProps().setProperty(n, value);
-updatingResource.put(n, new String[] {newSource});
+putIntoUpdatingResource(n, new String[] {newSource});
   }
 }
   }
@@ -1218,7 +1219,7 @@ public class Configuration implements 
Iterable>,
   for(String n : names) {
 getOverlay().setProperty(n, value);
 getProps().setProperty(n, value);
-updatingResource.put(n, new String[] {altSource});
+putIntoUpdatingResource(n, new String[] {altSource});
   }
 }
   }
@@ -2530,17 +2531,19 @@ public class Configuration implements 
Iterable>,
   protected synchronized Properties getProps() {
 if (properties == null) {
   properties = new Propert

  1   2   3   4   >