hadoop git commit: YARN-6769. Make schedulables without demand less needy in FairSharePolicy#compare. (Yunfan Zhou via Yufei Gu)

2017-07-13 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fd6935709 -> b2a34fe8d


YARN-6769. Make schedulables without demand less needy in 
FairSharePolicy#compare. (Yunfan Zhou via Yufei Gu)

(cherry picked from commit 4a574e9a84f2e997038452b22f2ad2a2d42e8ac8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2a34fe8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2a34fe8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2a34fe8

Branch: refs/heads/branch-2
Commit: b2a34fe8d60d4a95356bf88c65290610af5606a8
Parents: fd69357
Author: Yufei Gu 
Authored: Thu Jul 13 23:10:10 2017 -0700
Committer: Yufei Gu 
Committed: Thu Jul 13 23:11:27 2017 -0700

--
 .../scheduler/fair/policies/FairSharePolicy.java | 17 +++--
 .../scheduler/fair/TestSchedulingPolicy.java | 19 ---
 2 files changed, 27 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2a34fe8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index c3ec47a..2a852aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -58,6 +58,9 @@ public class FairSharePolicy extends SchedulingPolicy {
   /**
* Compare Schedulables via weighted fair sharing. In addition, Schedulables
* below their min share get priority over those whose min share is met.
+   *
+   * Schedulables without resource demand get lower priority than
+   * ones who have demands.
* 
* Schedulables below their min share are compared by how far below it they
* are as a ratio. For example, if job A has 8 out of a min share of 10 tasks
@@ -79,6 +82,16 @@ public class FairSharePolicy extends SchedulingPolicy {
 
 @Override
 public int compare(Schedulable s1, Schedulable s2) {
+  Resource demand1 = s1.getDemand();
+  Resource demand2 = s2.getDemand();
+  if (demand1.equals(Resources.none()) && Resources.greaterThan(
+  RESOURCE_CALCULATOR, null, demand2, Resources.none())) {
+return 1;
+  } else if (demand2.equals(Resources.none()) && Resources.greaterThan(
+  RESOURCE_CALCULATOR, null, demand1, Resources.none())) {
+return -1;
+  }
+
   double minShareRatio1, minShareRatio2;
   double useToWeightRatio1, useToWeightRatio2;
   double weight1, weight2;
@@ -86,9 +99,9 @@ public class FairSharePolicy extends SchedulingPolicy {
   Resource resourceUsage1 = s1.getResourceUsage();
   Resource resourceUsage2 = s2.getResourceUsage();
   Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
-  s1.getMinShare(), s1.getDemand());
+  s1.getMinShare(), demand1);
   Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
-  s2.getMinShare(), s2.getDemand());
+  s2.getMinShare(), demand2);
   boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
   resourceUsage1, minShare1);
   boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2a34fe8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
index d84f0cf..3a16454 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resou

hadoop git commit: YARN-6769. Make schedulables without demand less needy in FairSharePolicy#compare. (Yunfan Zhou via Yufei Gu)

2017-07-13 Thread yufei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 228ddaa31 -> 4a574e9a8


YARN-6769. Make schedulables without demand less needy in 
FairSharePolicy#compare. (Yunfan Zhou via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a574e9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a574e9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a574e9a

Branch: refs/heads/trunk
Commit: 4a574e9a84f2e997038452b22f2ad2a2d42e8ac8
Parents: 228ddaa
Author: Yufei Gu 
Authored: Thu Jul 13 23:10:10 2017 -0700
Committer: Yufei Gu 
Committed: Thu Jul 13 23:10:10 2017 -0700

--
 .../scheduler/fair/policies/FairSharePolicy.java | 17 +++--
 .../scheduler/fair/TestSchedulingPolicy.java | 19 ---
 2 files changed, 27 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a574e9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index c3ec47a..2a852aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -58,6 +58,9 @@ public class FairSharePolicy extends SchedulingPolicy {
   /**
* Compare Schedulables via weighted fair sharing. In addition, Schedulables
* below their min share get priority over those whose min share is met.
+   *
+   * Schedulables without resource demand get lower priority than
+   * ones who have demands.
* 
* Schedulables below their min share are compared by how far below it they
* are as a ratio. For example, if job A has 8 out of a min share of 10 tasks
@@ -79,6 +82,16 @@ public class FairSharePolicy extends SchedulingPolicy {
 
 @Override
 public int compare(Schedulable s1, Schedulable s2) {
+  Resource demand1 = s1.getDemand();
+  Resource demand2 = s2.getDemand();
+  if (demand1.equals(Resources.none()) && Resources.greaterThan(
+  RESOURCE_CALCULATOR, null, demand2, Resources.none())) {
+return 1;
+  } else if (demand2.equals(Resources.none()) && Resources.greaterThan(
+  RESOURCE_CALCULATOR, null, demand1, Resources.none())) {
+return -1;
+  }
+
   double minShareRatio1, minShareRatio2;
   double useToWeightRatio1, useToWeightRatio2;
   double weight1, weight2;
@@ -86,9 +99,9 @@ public class FairSharePolicy extends SchedulingPolicy {
   Resource resourceUsage1 = s1.getResourceUsage();
   Resource resourceUsage2 = s2.getResourceUsage();
   Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
-  s1.getMinShare(), s1.getDemand());
+  s1.getMinShare(), demand1);
   Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
-  s2.getMinShare(), s2.getDemand());
+  s2.getMinShare(), demand2);
   boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
   resourceUsage1, minShare1);
   boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a574e9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
index d84f0cf..3a16454 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
+++ 
b/hadoop-yarn-proje

hadoop git commit: MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of history server with HTTPS. Contributed by Lantao Jin

2017-07-13 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8c69b040a -> d6228fbeb


MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of 
history server with HTTPS. Contributed by Lantao Jin

(cherry picked from commit 43f0503286eccbc6bb8ae77584b635bfd0c48e50)
(cherry picked from commit 756a06814355465c85b9d66f262ee875dd86dbb7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6228fbe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6228fbe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6228fbe

Branch: refs/heads/branch-2.8
Commit: d6228fbebd0f43fb3dae43d9736caf26cbd0d0ae
Parents: 8c69b04
Author: Ravi Prakash 
Authored: Thu Jul 13 16:16:45 2017 -0700
Committer: Ravi Prakash 
Committed: Thu Jul 13 22:57:44 2017 -0700

--
 .../hadoop/mapreduce/v2/util/MRWebAppUtil.java  |  9 ---
 .../webapp/TestMapReduceTrackingUriPlugin.java  | 26 ++--
 2 files changed, 29 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6228fbe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
index d367060..951c9d5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
@@ -29,7 +29,6 @@ import 
org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -76,7 +75,9 @@ public class MRWebAppUtil {
 : "http://";;
   }
 
-  public static String getJHSWebappScheme() {
+  public static String getJHSWebappScheme(Configuration conf) {
+setHttpPolicyInJHS(conf.get(JHAdminConfig.MR_HS_HTTP_POLICY,
+JHAdminConfig.DEFAULT_MR_HS_HTTP_POLICY));
 return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://";
 : "http://";;
   }
@@ -101,7 +102,7 @@ public class MRWebAppUtil {
   }
   
   public static String getJHSWebappURLWithScheme(Configuration conf) {
-return getJHSWebappScheme() + getJHSWebappURLWithoutScheme(conf);
+return getJHSWebappScheme(conf) + getJHSWebappURLWithoutScheme(conf);
   }
   
   public static InetSocketAddress getJHSWebBindAddress(Configuration conf) {
@@ -153,7 +154,7 @@ public class MRWebAppUtil {
   
   public static String getApplicationWebURLOnJHSWithScheme(Configuration conf,
   ApplicationId appId) throws UnknownHostException {
-return getJHSWebappScheme()
+return getJHSWebappScheme(conf)
 + getApplicationWebURLOnJHSWithoutScheme(conf, appId);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6228fbe/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
index 8c3be58..9291097 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals;
 import java.net.URI;
 import java.net.URISyntaxException;
 
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -30,17 +31,38 @@ impor

hadoop git commit: YARN-5731. Preemption calculation is not accurate when reserved containers are present in queue. Contributed by Wangda Tan.

2017-07-13 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f5cdee6bf -> fd6935709


YARN-5731. Preemption calculation is not accurate when reserved containers are 
present in queue. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd693570
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd693570
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd693570

Branch: refs/heads/branch-2
Commit: fd6935709f2b0b86d0c4a0d3dc5b0c59eb7372ba
Parents: f5cdee6
Author: Sunil G 
Authored: Fri Jul 14 08:45:17 2017 +0530
Committer: Sunil G 
Committed: Fri Jul 14 08:45:17 2017 +0530

--
 .../capacity/FifoCandidatesSelector.java|  6 +-
 .../ProportionalCapacityPreemptionPolicy.java   | 22 -
 .../CapacitySchedulerPreemptionTestBase.java|  7 +-
 ...TestCapacitySchedulerSurgicalPreemption.java | 97 +++-
 4 files changed, 125 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd693570/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
index f4d7e92..f843db4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
@@ -43,12 +43,12 @@ public class FifoCandidatesSelector
   LogFactory.getLog(FifoCandidatesSelector.class);
   private PreemptableResourceCalculator preemptableAmountCalculator;
 
-  FifoCandidatesSelector(
-  CapacitySchedulerPreemptionContext preemptionContext) {
+  FifoCandidatesSelector(CapacitySchedulerPreemptionContext preemptionContext,
+  boolean includeReservedResource) {
 super(preemptionContext);
 
 preemptableAmountCalculator = new PreemptableResourceCalculator(
-preemptionContext, false);
+preemptionContext, includeReservedResource);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd693570/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 1abf964..a3319c2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -233,7 +233,27 @@ public class ProportionalCapacityPreemptionPolicy
 }
 
 // initialize candidates preemption selection policies
-candidatesSelectionPolicies.add(new FifoCandidatesSelector(this));
+// When select candidates for reserved containers is enabled, exclude 
reserved
+// resource in fifo policy (less aggressive). Otherwise include reserved
+// resource.
+//
+// Why doing this? In YARN-4390, we added 
preemption-based-on-reserved-container
+// Support. To reduce unnecessary preemption for large containers. We will
+// not include reserved resources while calculating ideal-allocation in
+// FifoCandidatesSelector.
+//
+// Changes in YARN-4390 will significantly reduce number of containers 
preempted
+// When cluster has heterogeneous container requests. (Please check test
+// report: 
https://issues.apache.

hadoop git commit: YARN-6792. Incorrect XML convertion in NodeIDsInfo and LabelsToNodesInfo. Contributed by Giovanni Matteo Fumarola.

2017-07-13 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 756a06814 -> f5cdee6bf


YARN-6792. Incorrect XML convertion in NodeIDsInfo and LabelsToNodesInfo. 
Contributed by Giovanni Matteo Fumarola.

(cherry picked from commit 228ddaa31d812533b862576445494bc2cd8a2884)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5cdee6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5cdee6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5cdee6b

Branch: refs/heads/branch-2
Commit: f5cdee6bfd9be6f15a13026df42de4abbfffd585
Parents: 756a068
Author: Sunil G 
Authored: Fri Jul 14 08:07:05 2017 +0530
Committer: Sunil G 
Committed: Fri Jul 14 08:08:32 2017 +0530

--
 .../hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java | 5 -
 .../server/resourcemanager/webapp/dao/LabelsToNodesInfo.java   | 6 +-
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5cdee6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
index c23b02a..5f45b96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
@@ -26,7 +26,10 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
-@XmlRootElement(name = "labelsToNodesInfo")
+/**
+ * XML element uses to represent NodeIds' list.
+ */
+@XmlRootElement(name = "nodeIDsInfo")
 @XmlAccessorType(XmlAccessType.FIELD)
 public class NodeIDsInfo {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5cdee6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
index 41dd410..e842d42 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
@@ -37,7 +37,11 @@ public class LabelsToNodesInfo {
   public LabelsToNodesInfo() {
   } // JAXB needs this
 
+  public LabelsToNodesInfo(Map labelsToNodes) {
+this.labelsToNodes = labelsToNodes;
+  }
+
   public Map getLabelsToNodes() {
-   return labelsToNodes;
+return labelsToNodes;
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6792. Incorrect XML convertion in NodeIDsInfo and LabelsToNodesInfo. Contributed by Giovanni Matteo Fumarola.

2017-07-13 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 43f050328 -> 228ddaa31


YARN-6792. Incorrect XML convertion in NodeIDsInfo and LabelsToNodesInfo. 
Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/228ddaa3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/228ddaa3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/228ddaa3

Branch: refs/heads/trunk
Commit: 228ddaa31d812533b862576445494bc2cd8a2884
Parents: 43f0503
Author: Sunil G 
Authored: Fri Jul 14 08:07:05 2017 +0530
Committer: Sunil G 
Committed: Fri Jul 14 08:07:05 2017 +0530

--
 .../hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java | 5 -
 .../server/resourcemanager/webapp/dao/LabelsToNodesInfo.java   | 6 +-
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/228ddaa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
index c23b02a..5f45b96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
@@ -26,7 +26,10 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
-@XmlRootElement(name = "labelsToNodesInfo")
+/**
+ * XML element uses to represent NodeIds' list.
+ */
+@XmlRootElement(name = "nodeIDsInfo")
 @XmlAccessorType(XmlAccessType.FIELD)
 public class NodeIDsInfo {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/228ddaa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
index 41dd410..e842d42 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
@@ -37,7 +37,11 @@ public class LabelsToNodesInfo {
   public LabelsToNodesInfo() {
   } // JAXB needs this
 
+  public LabelsToNodesInfo(Map labelsToNodes) {
+this.labelsToNodes = labelsToNodes;
+  }
+
   public Map getLabelsToNodes() {
-   return labelsToNodes;
+return labelsToNodes;
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong) [Forced Update!]

2017-07-13 Thread jhung
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 9aeeb554a -> 4cce82220 (forced update)


YARN-5953:Create CLI for changing YARN configurations. (Jonathan Hung via xgong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cce8222
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cce8222
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cce8222

Branch: refs/heads/YARN-5734
Commit: 4cce82220fe670c64be46d607008022e13b026ef
Parents: f22ce69
Author: Jonathan Hung 
Authored: Thu Jul 13 19:01:54 2017 -0700
Committer: Jonathan Hung 
Committed: Thu Jul 13 19:01:54 2017 -0700

--
 hadoop-yarn-project/hadoop-yarn/bin/yarn|   4 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd|   5 +
 .../hadoop/yarn/client/cli/SchedConfCLI.java| 238 +++
 .../yarn/client/cli/TestSchedConfCLI.java   | 160 +
 .../hadoop/yarn/webapp/dao/QueueConfigInfo.java |  57 +
 .../yarn/webapp/dao/SchedConfUpdateInfo.java|  85 +++
 .../hadoop/yarn/webapp/dao/package-info.java|  27 +++
 .../yarn/webapp/util/YarnWebServiceUtils.java   |  14 ++
 .../ConfigurationMutationACLPolicy.java |   2 +-
 .../DefaultConfigurationMutationACLPolicy.java  |   2 +-
 .../scheduler/MutableConfScheduler.java |   2 +-
 .../scheduler/MutableConfigurationProvider.java |   2 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +-
 .../conf/MutableCSConfigurationProvider.java|   4 +-
 ...ueueAdminConfigurationMutationACLPolicy.java |   4 +-
 .../resourcemanager/webapp/RMWebServices.java   |   1 +
 .../webapp/dao/QueueConfigInfo.java |  57 -
 .../webapp/dao/SchedConfUpdateInfo.java |  69 --
 .../TestConfigurationMutationACLPolicies.java   |   4 +-
 .../TestMutableCSConfigurationProvider.java |   4 +-
 .../TestRMWebServicesConfigurationMutation.java |  65 +++--
 21 files changed, 631 insertions(+), 177 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cce8222/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index cf6457b..21656fe 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -46,6 +46,7 @@ function hadoop_usage
   hadoop_add_subcommand "queue" "prints queue information"
   hadoop_add_subcommand "resourcemanager" "run the ResourceManager"
   hadoop_add_subcommand "rmadmin" "admin tools"
+  hadoop_add_subcommand "schedconf" "modify scheduler configuration"
   hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools"
   hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager 
daemon"
   hadoop_add_subcommand "timelinereader" "run the timeline reader server"
@@ -137,6 +138,9 @@ function yarncmd_case
 rmadmin)
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
 ;;
+schedconf)
+  HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.SchedConfCLI'
+;;
 scmadmin)
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cce8222/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index ca879f5..8b72394 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -285,6 +285,11 @@ goto :eof
   set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
   goto :eof
 
+:schedconf
+  set CLASS=org.apache.hadoop.yarn.client.cli.SchedConfCLI
+  set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+  goto :eof
+
 @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
 :make_command_arguments
   if "%1" == "--config" (

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cce8222/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
new file mode 100644
index 000..e17062e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -0,0 +1,238 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this wor

hadoop git commit: YARN-6821. Move FederationStateStore SQL DDL files from test resource to sbin.

2017-07-13 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 ea9a2b0bd -> a20c4d0fd


YARN-6821. Move FederationStateStore SQL DDL files from test resource to sbin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a20c4d0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a20c4d0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a20c4d0f

Branch: refs/heads/YARN-2915
Commit: a20c4d0fd5d779d33d3cbda28b9469b6a9c48c26
Parents: ea9a2b0
Author: Subru Krishnan 
Authored: Thu Jul 13 18:53:21 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Jul 13 18:53:21 2017 -0700

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |   1 +
 .../FederationStateStoreStoreProcs.sql  | 511 +++
 .../SQLServer/FederationStateStoreTables.sql| 122 +
 .../FederationStateStoreStoreProcs.sql  | 511 ---
 .../SQLServer/FederationStateStoreTables.sql| 122 -
 5 files changed, 634 insertions(+), 633 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a20c4d0f/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 74ce9bc..289061f 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -61,6 +61,7 @@
 stop-yarn.sh
 start-yarn.cmd
 stop-yarn.cmd
+FederationStateStore**/**
   
   0755
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a20c4d0f/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql
 
b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql
new file mode 100644
index 000..66d6f0e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql
@@ -0,0 +1,511 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+USE [FederationStateStore]
+GO
+
+IF OBJECT_ID ( '[sp_addApplicationHomeSubCluster]', 'P' ) IS NOT NULL
+DROP PROCEDURE [sp_addApplicationHomeSubCluster];
+GO
+
+CREATE PROCEDURE [dbo].[sp_addApplicationHomeSubCluster]
+@applicationId VARCHAR(64),
+@homeSubCluster VARCHAR(256),
+@storedHomeSubCluster VARCHAR(256) OUTPUT,
+@rowCount int OUTPUT
+AS BEGIN
+DECLARE @errorMessage nvarchar(4000)
+
+BEGIN TRY
+BEGIN TRAN
+-- If application to sub-cluster map doesn't exist, insert it.
+-- Otherwise don't change the current mapping.
+IF NOT EXISTS (SELECT TOP 1 *
+   FROM [dbo].[applicationsHomeSubCluster]
+   WHERE [applicationId] = @applicationId)
+
+INSERT INTO [dbo].[applicationsHomeSubCluster] (
+[applicationId],
+[homeSubCluster])
+VALUES (
+@applicationId,
+@homeSubCluster);
+-- End of the IF block
+
+SELECT @rowCount = @@ROWCOUNT;
+
+SELECT @storedHomeSubCluster = [homeSubCluster]
+FROM [dbo].[applicationsHomeSubCluster]
+WHERE [applicationId] = @applicationId;
+
+COMMIT TRAN
+END TRY
+
+BEGIN CATCH
+ROLLBACK TRAN
+
+SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), 
ERROR_LINE())
+
+/*  raise error and terminate the execution */
+RAISERROR(@errorMessage, --- Error Message
+1, -- Severity
+-1 -- State
+) WITH log
+END CATCH
+END;
+GO
+
+IF OBJECT_ID ( '[sp_upd

hadoop git commit: YARN-6815. FederationStateStoreFacade return behavior should be consistent irrespective of whether caching is enabled or not.

2017-07-13 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 dc923d1f2 -> ea9a2b0bd


YARN-6815. FederationStateStoreFacade return behavior should be consistent 
irrespective of whether caching is enabled or not.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea9a2b0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea9a2b0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea9a2b0b

Branch: refs/heads/YARN-2915
Commit: ea9a2b0bd71bc00f0c8379f8f55148fca552c094
Parents: dc923d1
Author: Subru Krishnan 
Authored: Thu Jul 13 18:51:06 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Jul 13 18:51:06 2017 -0700

--
 .../federation/policies/RouterPolicyFacade.java | 53 +++-
 .../store/FederationMembershipStateStore.java   |  7 +--
 .../federation/store/FederationPolicyStore.java |  3 +-
 .../store/impl/MemoryFederationStateStore.java  |  9 ++--
 .../store/impl/SQLFederationStateStore.java | 11 ++--
 .../utils/FederationStateStoreFacade.java   | 20 +---
 .../impl/FederationStateStoreBaseTest.java  | 21 +++-
 .../utils/FederationStateStoreTestUtil.java |  1 +
 .../utils/TestFederationStateStoreFacade.java   | 16 +-
 9 files changed, 84 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea9a2b0b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
index 52c2905..bbf08e0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
@@ -23,13 +23,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.LocalityMulticastAMRMProxyPolicy;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
 import 
org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
@@ -38,6 +35,8 @@ import 
org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
 import 
org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -47,8 +46,8 @@ import com.google.common.annotations.VisibleForTesting;
  */
 public class RouterPolicyFacade {
 
-  private static final Log LOG =
-  LogFactory.getLog(LocalityMulticastAMRMProxyPolicy.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(RouterPolicyFacade.class);
 
   private final SubClusterResolver subClusterResolver;
   private final FederationStateStoreFacade federationFacade;
@@ -68,10 +67,10 @@ public class RouterPolicyFacade {
 this.globalPolicyMap = new ConcurrentHashMap<>();
 
 // load default behavior from store if possible
-String defaulKey = YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY;
+String defaultKey = YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY;
 SubClusterPolicyConfiguration configuration = null;
 try {
-  configuration = federationFacade.getPolicyConfiguration(defaulKey);
+  configuration = federationFacade.getPolicyConfiguration(defaultKey);
 } catch (YarnException e) {
   LOG.warn("No fallback behavior defined in store, defaulting to XML "
   + "configuration fallback behavior.");
@@ -88,7 +87,7 @@ public class RouterPolicyFacade {
   ByteBuffer defaultPolicyParam = ByteBuf

hadoop git commit: YARN-6807. Adding required missing configs to Federation configuration guide based on e2e testing. (Tanuj Nayak via Subru).

2017-07-13 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 590d959ee -> dc923d1f2


YARN-6807. Adding required missing configs to Federation configuration guide 
based on e2e testing. (Tanuj Nayak via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc923d1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc923d1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc923d1f

Branch: refs/heads/YARN-2915
Commit: dc923d1f239e25f00a31cb07a309f9aef3903070
Parents: 590d959
Author: Subru Krishnan 
Authored: Thu Jul 13 18:44:32 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Jul 13 18:44:32 2017 -0700

--
 .../src/site/markdown/Federation.md | 53 ++--
 1 file changed, 49 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc923d1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
index c50ba76..79225b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
@@ -86,6 +86,8 @@ of the desirable properties of balance, optimal cluster 
utilization and global i
 
 *NOTE*: In the current implementation the GPG is a manual tuning process, 
simply exposed via a CLI (YARN-3657).
 
+This part of the federation system is part of future work in 
[YARN-5597](https://issues.apache.org/jira/browse/YARN-5597).
+
 
 ###Federation State-Store
 The Federation State defines the additional state that needs to be maintained 
to loosely couple multiple individual sub-clusters into a single large 
federated cluster. This includes the following information:
@@ -159,7 +161,7 @@ These are common configurations that should appear in the 
**conf/yarn-site.xml**
 |: |: |
 |`yarn.federation.enabled` | `true` | Whether federation is enabled or not |
 |`yarn.federation.state-store.class` | 
`org.apache.hadoop.yarn.server.federation.store.impl.SQLFederationStateStore` | 
The type of state-store to use. |
-|`yarn.federation.state-store.sql.url` | 
`jdbc:sqlserver://:;database` | For SQLFederationStateStore the 
name of the DB where the state is stored. |
+|`yarn.federation.state-store.sql.url` | 
`jdbc:sqlserver://:;databaseName=FederationStateStore` | For 
SQLFederationStateStore the name of the DB where the state is stored. |
 |`yarn.federation.state-store.sql.jdbc-class` | 
`com.microsoft.sqlserver.jdbc.SQLServerDataSource` | For 
SQLFederationStateStore the jdbc class to use. |
 |`yarn.federation.state-store.sql.username` | `` | For 
SQLFederationStateStore the username for the DB connection. |
 |`yarn.federation.state-store.sql.password` | `` | For 
SQLFederationStateStore the password for the DB connection. |
@@ -175,7 +177,7 @@ Optional:
 |`yarn.federation.policy-manager` | 
`org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager`
 | The choice of policy manager determines how Applications and 
ResourceRequests are routed through the system. |
 |`yarn.federation.policy-manager-params` | `` | The payload that 
configures the policy. In our example a set of weights for router and amrmproxy 
policies. This is typically generated by serializing a policymanager that has 
been configured programmatically, or by populating the state-store with the 
.json serialized form of it. |
 |`yarn.federation.subcluster-resolver.class` | 
`org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl`
 | The class used to resolve which subcluster a node belongs to, and which 
subcluster(s) a rack belongs to. |
-| `yarn.federation.machine-list` | `node1,subcluster1,rack1\n node2 , 
subcluster2, RACK1\n noDE3,subcluster3, rack2\n node4, subcluster3, rack2\n` | 
a list of Nodes, Sub-clusters, Rack, used by the 
`DefaultSubClusterResolverImpl` |
+| `yarn.federation.machine-list` | `node1,subcluster1,rack1\n node2 , 
subcluster2, RACK1\n node3,subcluster3, rack2\n node4, subcluster3, rack2\n` | 
a list of Nodes, Sub-clusters, Rack, used by the 
`DefaultSubClusterResolverImpl` |
 
 ###ON RMs:
 
@@ -200,6 +202,7 @@ These are extra configurations that should appear in the 
**conf/yarn-site.xml**
 | Property | Example | Description |
 |: |: |
 |`yarn.router.bind-host` | `0.0.0.0` | Host IP to bind the router to.  The 
actual address the server will bind to. If this optional address is set, the 
RPC and webapp servers will bind to this address and the port specified in 
yarn.router.*.address respectively. Thi

hadoop git commit: MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of history server with HTTPS. Contributed by Lantao Jin

2017-07-13 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a77fb561e -> 756a06814


MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of 
history server with HTTPS. Contributed by Lantao Jin

(cherry picked from commit 43f0503286eccbc6bb8ae77584b635bfd0c48e50)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/756a0681
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/756a0681
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/756a0681

Branch: refs/heads/branch-2
Commit: 756a06814355465c85b9d66f262ee875dd86dbb7
Parents: a77fb56
Author: Ravi Prakash 
Authored: Thu Jul 13 16:16:45 2017 -0700
Committer: Ravi Prakash 
Committed: Thu Jul 13 16:19:52 2017 -0700

--
 .../hadoop/mapreduce/v2/util/MRWebAppUtil.java  |  9 ---
 .../webapp/TestMapReduceTrackingUriPlugin.java  | 26 ++--
 2 files changed, 29 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/756a0681/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
index d367060..951c9d5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
@@ -29,7 +29,6 @@ import 
org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -76,7 +75,9 @@ public class MRWebAppUtil {
 : "http://";;
   }
 
-  public static String getJHSWebappScheme() {
+  public static String getJHSWebappScheme(Configuration conf) {
+setHttpPolicyInJHS(conf.get(JHAdminConfig.MR_HS_HTTP_POLICY,
+JHAdminConfig.DEFAULT_MR_HS_HTTP_POLICY));
 return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://";
 : "http://";;
   }
@@ -101,7 +102,7 @@ public class MRWebAppUtil {
   }
   
   public static String getJHSWebappURLWithScheme(Configuration conf) {
-return getJHSWebappScheme() + getJHSWebappURLWithoutScheme(conf);
+return getJHSWebappScheme(conf) + getJHSWebappURLWithoutScheme(conf);
   }
   
   public static InetSocketAddress getJHSWebBindAddress(Configuration conf) {
@@ -153,7 +154,7 @@ public class MRWebAppUtil {
   
   public static String getApplicationWebURLOnJHSWithScheme(Configuration conf,
   ApplicationId appId) throws UnknownHostException {
-return getJHSWebappScheme()
+return getJHSWebappScheme(conf)
 + getApplicationWebURLOnJHSWithoutScheme(conf, appId);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/756a0681/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
index 8c3be58..9291097 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals;
 import java.net.URI;
 import java.net.URISyntaxException;
 
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -30,17 +31,38 @@ import org.junit.Test;
 
 public class TestMapReduceTrackingUriPlugin {
   @Te

hadoop git commit: MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of history server with HTTPS. Contributed by Lantao Jin

2017-07-13 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/trunk ebc048cc0 -> 43f050328


MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of 
history server with HTTPS. Contributed by Lantao Jin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43f05032
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43f05032
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43f05032

Branch: refs/heads/trunk
Commit: 43f0503286eccbc6bb8ae77584b635bfd0c48e50
Parents: ebc048c
Author: Ravi Prakash 
Authored: Thu Jul 13 16:16:45 2017 -0700
Committer: Ravi Prakash 
Committed: Thu Jul 13 16:16:45 2017 -0700

--
 .../hadoop/mapreduce/v2/util/MRWebAppUtil.java  |  9 ---
 .../webapp/TestMapReduceTrackingUriPlugin.java  | 26 ++--
 2 files changed, 29 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f05032/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
index d367060..951c9d5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
@@ -29,7 +29,6 @@ import 
org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -76,7 +75,9 @@ public class MRWebAppUtil {
 : "http://";;
   }
 
-  public static String getJHSWebappScheme() {
+  public static String getJHSWebappScheme(Configuration conf) {
+setHttpPolicyInJHS(conf.get(JHAdminConfig.MR_HS_HTTP_POLICY,
+JHAdminConfig.DEFAULT_MR_HS_HTTP_POLICY));
 return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://";
 : "http://";;
   }
@@ -101,7 +102,7 @@ public class MRWebAppUtil {
   }
   
   public static String getJHSWebappURLWithScheme(Configuration conf) {
-return getJHSWebappScheme() + getJHSWebappURLWithoutScheme(conf);
+return getJHSWebappScheme(conf) + getJHSWebappURLWithoutScheme(conf);
   }
   
   public static InetSocketAddress getJHSWebBindAddress(Configuration conf) {
@@ -153,7 +154,7 @@ public class MRWebAppUtil {
   
   public static String getApplicationWebURLOnJHSWithScheme(Configuration conf,
   ApplicationId appId) throws UnknownHostException {
-return getJHSWebappScheme()
+return getJHSWebappScheme(conf)
 + getApplicationWebURLOnJHSWithoutScheme(conf, appId);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f05032/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
index 8c3be58..9291097 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals;
 import java.net.URI;
 import java.net.URISyntaxException;
 
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -30,17 +31,38 @@ import org.junit.Test;
 
 public class TestMapReduceTrackingUriPlugin {
   @Test
-  public void testProducesHistoryServerUriForAppId() throws URISyntaxExc

hadoop git commit: YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 9cff53fed -> 8c69b040a


YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe

(cherry picked from commit a77fb561efcea03060674d37ebadc15069766b6f)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c69b040
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c69b040
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c69b040

Branch: refs/heads/branch-2.8
Commit: 8c69b040ac74418e2f89126da2029d1e37b6d05d
Parents: 9cff53f
Author: Jason Lowe 
Authored: Thu Jul 13 18:00:05 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 18:00:05 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 17 ++--
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 85 
 4 files changed, 106 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c69b040/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 2dc4ad4..e1c6c96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -158,6 +158,10 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+return PrivilegedOperationExecutor.getInstance(getConf());
+  }
+
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -168,7 +172,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -259,7 +263,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -407,8 +411,9 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-if (!e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + e.getOutput() + "\n");
+String output = e.getOutput();
+if (output != null && !e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + output + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -540,7 +545,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -576,7 +581,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.execu

hadoop git commit: YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.2 0bb0b5596 -> 5cf6a1af0


YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe

(cherry picked from commit 8c69b040ac74418e2f89126da2029d1e37b6d05d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cf6a1af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cf6a1af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cf6a1af

Branch: refs/heads/branch-2.8.2
Commit: 5cf6a1af0f1d1abdd7e9d29d5b427d554991c7dd
Parents: 0bb0b55
Author: Jason Lowe 
Authored: Thu Jul 13 18:00:05 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 18:01:20 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 17 ++--
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 85 
 4 files changed, 106 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cf6a1af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 2dc4ad4..e1c6c96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -158,6 +158,10 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+return PrivilegedOperationExecutor.getInstance(getConf());
+  }
+
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -168,7 +172,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -259,7 +263,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -407,8 +411,9 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-if (!e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + e.getOutput() + "\n");
+String output = e.getOutput();
+if (output != null && !e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + output + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -540,7 +545,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -576,7 +581,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(mountCGroupsOp,
   false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cf6a1af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-se

hadoop git commit: YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6ce2b0e4d -> a77fb561e


YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe

(cherry picked from commit ebc048cc055d0f7d1b85bc0b6f56cd15673e837d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a77fb561
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a77fb561
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a77fb561

Branch: refs/heads/branch-2
Commit: a77fb561efcea03060674d37ebadc15069766b6f
Parents: 6ce2b0e
Author: Jason Lowe 
Authored: Thu Jul 13 17:44:47 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:49:55 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 19 +++--
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 89 
 4 files changed, 111 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a77fb561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index a2ac26e..b88866f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,6 +275,10 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+return PrivilegedOperationExecutor.getInstance(getConf());
+  }
+
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -285,7 +289,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -382,7 +386,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -549,8 +553,9 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-if (!e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + e.getOutput() + "\n");
+String output = e.getOutput();
+if (output != null && !e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + output + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -704,7 +709,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -734,7 +739,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
 try {
   PrivilegedOperationExecutor privOpExecutor =
-  PrivilegedOperationExecutor.getInstance(super.getConf());
+  getPrivilegedOperationExecutor();
 
   String results =
   privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -793,7 +798,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor p

[2/2] hadoop git commit: YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe

2017-07-13 Thread jlowe
YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebc048cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebc048cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebc048cc

Branch: refs/heads/trunk
Commit: ebc048cc055d0f7d1b85bc0b6f56cd15673e837d
Parents: 0ffca5d
Author: Jason Lowe 
Authored: Thu Jul 13 17:44:47 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:44:47 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 19 +++--
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 89 
 4 files changed, 111 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebc048cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 9a3b2d2..2aaa835 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,6 +275,10 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+return PrivilegedOperationExecutor.getInstance(getConf());
+  }
+
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -285,7 +289,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -382,7 +386,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -530,8 +534,9 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-if (!e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + e.getOutput() + "\n");
+String output = e.getOutput();
+if (output != null && !e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + output + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -729,7 +734,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -759,7 +764,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
 try {
   PrivilegedOperationExecutor privOpExecutor =
-  PrivilegedOperationExecutor.getInstance(super.getConf());
+  getPrivilegedOperationExecutor();
 
   String results =
   privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -818,7 +823,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperat

[1/2] hadoop git commit: Revert "YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe"

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk f76f5c091 -> ebc048cc0


Revert "YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe"

This reverts commit f76f5c0919cdb0b032edb309d137093952e77268.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ffca5d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ffca5d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ffca5d3

Branch: refs/heads/trunk
Commit: 0ffca5d347df0acb1979dff7a07ae88ea834adc7
Parents: f76f5c0
Author: Jason Lowe 
Authored: Thu Jul 13 17:42:38 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:42:38 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 19 ++---
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 89 
 4 files changed, 17 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ffca5d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 47b99c2..9a3b2d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,10 +275,6 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
-  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
-return PrivilegedOperationExecutor.getInstance(getConf());
-  }
-
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -289,7 +285,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  getPrivilegedOperationExecutor();
+  PrivilegedOperationExecutor.getInstance(conf);
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -386,7 +382,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  getPrivilegedOperationExecutor();
+  PrivilegedOperationExecutor.getInstance(conf);
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -534,9 +530,8 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-String output = e.getOutput();
-if (output!= null && !e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + output + "\n");
+if (!e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + e.getOutput() + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -734,7 +729,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  getPrivilegedOperationExecutor();
+  PrivilegedOperationExecutor.getInstance(conf);
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -764,7 +759,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
 try {
   PrivilegedOperationExecutor privOpExecutor =
-  getPrivilegedOperationExecutor();
+  PrivilegedOperationExecutor.getInstance(super.getConf());
 
   String results =
   privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -823,7 +818,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor privil

hadoop git commit: YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5f1ee72b0 -> f76f5c091


YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f76f5c09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f76f5c09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f76f5c09

Branch: refs/heads/trunk
Commit: f76f5c0919cdb0b032edb309d137093952e77268
Parents: 5f1ee72
Author: Jason Lowe 
Authored: Thu Jul 13 17:38:17 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:38:17 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 19 +++--
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 89 
 4 files changed, 111 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76f5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 9a3b2d2..47b99c2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,6 +275,10 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+return PrivilegedOperationExecutor.getInstance(getConf());
+  }
+
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -285,7 +289,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -382,7 +386,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -530,8 +534,9 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-if (!e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + e.getOutput() + "\n");
+String output = e.getOutput();
+if (output!= null && !e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + output + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -729,7 +734,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -759,7 +764,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
 try {
   PrivilegedOperationExecutor privOpExecutor =
-  PrivilegedOperationExecutor.getInstance(super.getConf());
+  getPrivilegedOperationExecutor();
 
   String results =
   privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -818,7 +823,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstan

hadoop git commit: YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst upgrade. Contributed by Jonathan Eagles

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.2 eaef44bc4 -> 0bb0b5596


YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst 
upgrade. Contributed by Jonathan Eagles

(cherry picked from commit 5f1ee72b0ebf0330417b7c0115083bc851923be4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bb0b559
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bb0b559
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bb0b559

Branch: refs/heads/branch-2.8.2
Commit: 0bb0b559656f8a20308af54dd3f0f85043c64ffd
Parents: eaef44b
Author: Jason Lowe 
Authored: Thu Jul 13 17:27:40 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:35:35 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 50 
 1 file changed, 41 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bb0b559/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index d139346..00f6630 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -28,6 +28,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.EnumSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -74,6 +75,7 @@ import org.iq80.leveldb.Options;
 import org.iq80.leveldb.ReadOptions;
 import org.iq80.leveldb.WriteBatch;
 import org.nustaq.serialization.FSTConfiguration;
+import org.nustaq.serialization.FSTClazzNameRegistry;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
@@ -170,9 +172,22 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   .getLog(RollingLevelDBTimelineStore.class);
   private static FSTConfiguration fstConf =
   FSTConfiguration.createDefaultConfiguration();
+  // Fall back to 2.24 parsing if 2.50 parsing fails
+  private static FSTConfiguration fstConf224 =
+  FSTConfiguration.createDefaultConfiguration();
+  // Static class code for 2.24
+  private static final int LINKED_HASH_MAP_224_CODE = 83;
 
   static {
 fstConf.setShareReferences(false);
+fstConf224.setShareReferences(false);
+// YARN-6654 unable to find class for code 83 (LinkedHashMap)
+// The linked hash map was changed between 2.24 and 2.50 so that
+// the static code for LinkedHashMap (83) was changed to a dynamic
+// code.
+FSTClazzNameRegistry registry = fstConf224.getClassRegistry();
+registry.registerClass(
+LinkedHashMap.class, LINKED_HASH_MAP_224_CODE, fstConf224);
   }
 
   @Private
@@ -339,7 +354,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   deletionThread.start();
 }
 super.serviceStart();
-   }
+  }
 
   @Override
   protected void serviceStop() throws Exception {
@@ -365,7 +380,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 private final long ttl;
 private final long ttlInterval;
 
-public EntityDeletionThread(Configuration conf) {
+EntityDeletionThread(Configuration conf) {
   ttl = conf.getLong(TIMELINE_SERVICE_TTL_MS,
   DEFAULT_TIMELINE_SERVICE_TTL_MS);
   ttlInterval = conf.getLong(
@@ -479,9 +494,15 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   try {
 o = fstConf.asObject(iterator.peekNext().getValue());
 entity.addOtherInfo(keyStr, o);
-  } catch (Exception e) {
-LOG.warn("Error while decoding "
-+ entityId + ":otherInfo:" + keyStr, e);
+  } catch (Exception ignore) {
+try {
+  // Fall back to 2.24 parser
+  o = fstConf224.asObject(iterator.peekNext().getValue());
+  entity.addOtherInfo(keyStr, o);
+} catch (Exception e) {
+  LOG.warn("Error while decoding "
+  

hadoop git commit: YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst upgrade. Contributed by Jonathan Eagles

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c180005fc -> 9cff53fed


YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst 
upgrade. Contributed by Jonathan Eagles

(cherry picked from commit 5f1ee72b0ebf0330417b7c0115083bc851923be4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cff53fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cff53fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cff53fe

Branch: refs/heads/branch-2.8
Commit: 9cff53fed90f203f07dc4b3f29a06bd1c0428415
Parents: c180005
Author: Jason Lowe 
Authored: Thu Jul 13 17:27:40 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:30:57 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 50 
 1 file changed, 41 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cff53fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index d139346..00f6630 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -28,6 +28,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.EnumSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -74,6 +75,7 @@ import org.iq80.leveldb.Options;
 import org.iq80.leveldb.ReadOptions;
 import org.iq80.leveldb.WriteBatch;
 import org.nustaq.serialization.FSTConfiguration;
+import org.nustaq.serialization.FSTClazzNameRegistry;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
@@ -170,9 +172,22 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   .getLog(RollingLevelDBTimelineStore.class);
   private static FSTConfiguration fstConf =
   FSTConfiguration.createDefaultConfiguration();
+  // Fall back to 2.24 parsing if 2.50 parsing fails
+  private static FSTConfiguration fstConf224 =
+  FSTConfiguration.createDefaultConfiguration();
+  // Static class code for 2.24
+  private static final int LINKED_HASH_MAP_224_CODE = 83;
 
   static {
 fstConf.setShareReferences(false);
+fstConf224.setShareReferences(false);
+// YARN-6654 unable to find class for code 83 (LinkedHashMap)
+// The linked hash map was changed between 2.24 and 2.50 so that
+// the static code for LinkedHashMap (83) was changed to a dynamic
+// code.
+FSTClazzNameRegistry registry = fstConf224.getClassRegistry();
+registry.registerClass(
+LinkedHashMap.class, LINKED_HASH_MAP_224_CODE, fstConf224);
   }
 
   @Private
@@ -339,7 +354,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   deletionThread.start();
 }
 super.serviceStart();
-   }
+  }
 
   @Override
   protected void serviceStop() throws Exception {
@@ -365,7 +380,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 private final long ttl;
 private final long ttlInterval;
 
-public EntityDeletionThread(Configuration conf) {
+EntityDeletionThread(Configuration conf) {
   ttl = conf.getLong(TIMELINE_SERVICE_TTL_MS,
   DEFAULT_TIMELINE_SERVICE_TTL_MS);
   ttlInterval = conf.getLong(
@@ -479,9 +494,15 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   try {
 o = fstConf.asObject(iterator.peekNext().getValue());
 entity.addOtherInfo(keyStr, o);
-  } catch (Exception e) {
-LOG.warn("Error while decoding "
-+ entityId + ":otherInfo:" + keyStr, e);
+  } catch (Exception ignore) {
+try {
+  // Fall back to 2.24 parser
+  o = fstConf224.asObject(iterator.peekNext().getValue());
+  entity.addOtherInfo(keyStr, o);
+} catch (Exception e) {
+  LOG.warn("Error while decoding "
+  + en

hadoop git commit: YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst upgrade. Contributed by Jonathan Eagles

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d83e8712f -> 6ce2b0e4d


YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst 
upgrade. Contributed by Jonathan Eagles

(cherry picked from commit 5f1ee72b0ebf0330417b7c0115083bc851923be4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ce2b0e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ce2b0e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ce2b0e4

Branch: refs/heads/branch-2
Commit: 6ce2b0e4d4093cb03e2b3afa466a0bfc88a180a5
Parents: d83e871
Author: Jason Lowe 
Authored: Thu Jul 13 17:27:40 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:28:49 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 50 
 1 file changed, 41 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ce2b0e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index d139346..00f6630 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -28,6 +28,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.EnumSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -74,6 +75,7 @@ import org.iq80.leveldb.Options;
 import org.iq80.leveldb.ReadOptions;
 import org.iq80.leveldb.WriteBatch;
 import org.nustaq.serialization.FSTConfiguration;
+import org.nustaq.serialization.FSTClazzNameRegistry;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
@@ -170,9 +172,22 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   .getLog(RollingLevelDBTimelineStore.class);
   private static FSTConfiguration fstConf =
   FSTConfiguration.createDefaultConfiguration();
+  // Fall back to 2.24 parsing if 2.50 parsing fails
+  private static FSTConfiguration fstConf224 =
+  FSTConfiguration.createDefaultConfiguration();
+  // Static class code for 2.24
+  private static final int LINKED_HASH_MAP_224_CODE = 83;
 
   static {
 fstConf.setShareReferences(false);
+fstConf224.setShareReferences(false);
+// YARN-6654 unable to find class for code 83 (LinkedHashMap)
+// The linked hash map was changed between 2.24 and 2.50 so that
+// the static code for LinkedHashMap (83) was changed to a dynamic
+// code.
+FSTClazzNameRegistry registry = fstConf224.getClassRegistry();
+registry.registerClass(
+LinkedHashMap.class, LINKED_HASH_MAP_224_CODE, fstConf224);
   }
 
   @Private
@@ -339,7 +354,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   deletionThread.start();
 }
 super.serviceStart();
-   }
+  }
 
   @Override
   protected void serviceStop() throws Exception {
@@ -365,7 +380,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 private final long ttl;
 private final long ttlInterval;
 
-public EntityDeletionThread(Configuration conf) {
+EntityDeletionThread(Configuration conf) {
   ttl = conf.getLong(TIMELINE_SERVICE_TTL_MS,
   DEFAULT_TIMELINE_SERVICE_TTL_MS);
   ttlInterval = conf.getLong(
@@ -479,9 +494,15 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   try {
 o = fstConf.asObject(iterator.peekNext().getValue());
 entity.addOtherInfo(keyStr, o);
-  } catch (Exception e) {
-LOG.warn("Error while decoding "
-+ entityId + ":otherInfo:" + keyStr, e);
+  } catch (Exception ignore) {
+try {
+  // Fall back to 2.24 parser
+  o = fstConf224.asObject(iterator.peekNext().getValue());
+  entity.addOtherInfo(keyStr, o);
+} catch (Exception e) {
+  LOG.warn("Error while decoding "
+  + entity

hadoop git commit: YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst upgrade. Contributed by Jonathan Eagles

2017-07-13 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 945c0958b -> 5f1ee72b0


YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst 
upgrade. Contributed by Jonathan Eagles


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f1ee72b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f1ee72b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f1ee72b

Branch: refs/heads/trunk
Commit: 5f1ee72b0ebf0330417b7c0115083bc851923be4
Parents: 945c095
Author: Jason Lowe 
Authored: Thu Jul 13 17:27:40 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:27:40 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 50 
 1 file changed, 41 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f1ee72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index d139346..00f6630 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -28,6 +28,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.EnumSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -74,6 +75,7 @@ import org.iq80.leveldb.Options;
 import org.iq80.leveldb.ReadOptions;
 import org.iq80.leveldb.WriteBatch;
 import org.nustaq.serialization.FSTConfiguration;
+import org.nustaq.serialization.FSTClazzNameRegistry;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
@@ -170,9 +172,22 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   .getLog(RollingLevelDBTimelineStore.class);
   private static FSTConfiguration fstConf =
   FSTConfiguration.createDefaultConfiguration();
+  // Fall back to 2.24 parsing if 2.50 parsing fails
+  private static FSTConfiguration fstConf224 =
+  FSTConfiguration.createDefaultConfiguration();
+  // Static class code for 2.24
+  private static final int LINKED_HASH_MAP_224_CODE = 83;
 
   static {
 fstConf.setShareReferences(false);
+fstConf224.setShareReferences(false);
+// YARN-6654 unable to find class for code 83 (LinkedHashMap)
+// The linked hash map was changed between 2.24 and 2.50 so that
+// the static code for LinkedHashMap (83) was changed to a dynamic
+// code.
+FSTClazzNameRegistry registry = fstConf224.getClassRegistry();
+registry.registerClass(
+LinkedHashMap.class, LINKED_HASH_MAP_224_CODE, fstConf224);
   }
 
   @Private
@@ -339,7 +354,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   deletionThread.start();
 }
 super.serviceStart();
-   }
+  }
 
   @Override
   protected void serviceStop() throws Exception {
@@ -365,7 +380,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 private final long ttl;
 private final long ttlInterval;
 
-public EntityDeletionThread(Configuration conf) {
+EntityDeletionThread(Configuration conf) {
   ttl = conf.getLong(TIMELINE_SERVICE_TTL_MS,
   DEFAULT_TIMELINE_SERVICE_TTL_MS);
   ttlInterval = conf.getLong(
@@ -479,9 +494,15 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   try {
 o = fstConf.asObject(iterator.peekNext().getValue());
 entity.addOtherInfo(keyStr, o);
-  } catch (Exception e) {
-LOG.warn("Error while decoding "
-+ entityId + ":otherInfo:" + keyStr, e);
+  } catch (Exception ignore) {
+try {
+  // Fall back to 2.24 parser
+  o = fstConf224.asObject(iterator.peekNext().getValue());
+  entity.addOtherInfo(keyStr, o);
+} catch (Exception e) {
+  LOG.warn("Error while decoding "
+  + entityId + ":otherInfo:" + keyStr, e);
+}
   }
 }

hadoop git commit: MAPREDUCE-6697. Concurrent task limits should only be applied when necessary. Contributed by Nathan Roberts.

2017-07-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 187bb77dd -> 7d24d4751


MAPREDUCE-6697. Concurrent task limits should only be applied when necessary. 
Contributed by Nathan Roberts.

(cherry picked from commit a5c0476a990ec1e7eb34ce2462a45aa52cc1350d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d24d475
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d24d475
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d24d475

Branch: refs/heads/branch-2.7
Commit: 7d24d4751e6becdb35377c3e836b1e95e663688f
Parents: 187bb77
Author: Akira Ajisaka 
Authored: Wed Jun 28 10:50:09 2017 +0900
Committer: Konstantin V Shvachko 
Committed: Thu Jul 13 14:20:17 2017 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../v2/app/rm/RMContainerAllocator.java |  6 +-
 .../v2/app/rm/TestRMContainerAllocator.java | 73 ++--
 3 files changed, 76 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d24d475/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 68c5310..7230590 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -41,6 +41,9 @@ Release 2.7.4 - UNRELEASED
 MAPREDUCE-6433. launchTime may be negative.
 (zxu) backported by Chris Douglas.
 
+MAPREDUCE-6697. Concurrent task limits should only be applied when
+necessary. (Nathan Roberts via shv).
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d24d475/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 0df58b7..5426dc1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -818,7 +818,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 
   private void applyConcurrentTaskLimits() {
 int numScheduledMaps = scheduledRequests.maps.size();
-if (maxRunningMaps > 0 && numScheduledMaps > 0) {
+if (maxRunningMaps > 0 && numScheduledMaps > 0 &&
+getJob().getTotalMaps() > maxRunningMaps) {
   int maxRequestedMaps = Math.max(0,
   maxRunningMaps - assignedRequests.maps.size());
   int numScheduledFailMaps = scheduledRequests.earlierFailedMaps.size();
@@ -833,7 +834,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 }
 
 int numScheduledReduces = scheduledRequests.reduces.size();
-if (maxRunningReduces > 0 && numScheduledReduces > 0) {
+if (maxRunningReduces > 0 && numScheduledReduces > 0 &&
+getJob().getTotalReduces() > maxRunningReduces) {
   int maxRequestedReduces = Math.max(0,
   maxRunningReduces - assignedRequests.reduces.size());
   int reduceRequestLimit = Math.min(maxRequestedReduces,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d24d475/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index da1fbfb..4275fcc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -2618,14 +2618,77 @@ public class TestRMContainerAllocator {
   }
 
   @Test
+  public void testConcurrentTaskLimitsDisabledIfSmaller() throws Exception {
+fin

hadoop git commit: MAPREDUCE-6697. Concurrent task limits should only be applied when necessary. Contributed by Nathan Roberts.

2017-07-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7c8d4a1b3 -> c180005fc


MAPREDUCE-6697. Concurrent task limits should only be applied when necessary. 
Contributed by Nathan Roberts.

(cherry picked from commit a5c0476a990ec1e7eb34ce2462a45aa52cc1350d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c180005f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c180005f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c180005f

Branch: refs/heads/branch-2.8
Commit: c180005fcf3d47771f5924c96ee7753b07fa6d44
Parents: 7c8d4a1
Author: Akira Ajisaka 
Authored: Wed Jun 28 10:50:09 2017 +0900
Committer: Konstantin V Shvachko 
Committed: Thu Jul 13 13:30:33 2017 -0700

--
 .../v2/app/rm/RMContainerAllocator.java |  6 +-
 .../v2/app/rm/TestRMContainerAllocator.java | 73 ++--
 2 files changed, 73 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c180005f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 55fc7bc..2af670c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -842,7 +842,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 
   private void applyConcurrentTaskLimits() {
 int numScheduledMaps = scheduledRequests.maps.size();
-if (maxRunningMaps > 0 && numScheduledMaps > 0) {
+if (maxRunningMaps > 0 && numScheduledMaps > 0 &&
+getJob().getTotalMaps() > maxRunningMaps) {
   int maxRequestedMaps = Math.max(0,
   maxRunningMaps - assignedRequests.maps.size());
   int numScheduledFailMaps = scheduledRequests.earlierFailedMaps.size();
@@ -857,7 +858,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 }
 
 int numScheduledReduces = scheduledRequests.reduces.size();
-if (maxRunningReduces > 0 && numScheduledReduces > 0) {
+if (maxRunningReduces > 0 && numScheduledReduces > 0 &&
+getJob().getTotalReduces() > maxRunningReduces) {
   int maxRequestedReduces = Math.max(0,
   maxRunningReduces - assignedRequests.reduces.size());
   int reduceRequestLimit = Math.min(maxRequestedReduces,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c180005f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 4e11a5c..023dcd8 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -2733,14 +2733,77 @@ public class TestRMContainerAllocator {
   }
 
   @Test
+  public void testConcurrentTaskLimitsDisabledIfSmaller() throws Exception {
+final int MAP_COUNT = 1;
+final int REDUCE_COUNT = 1;
+final int MAP_LIMIT = 1;
+final int REDUCE_LIMIT = 1;
+Configuration conf = new Configuration();
+conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MAP_LIMIT);
+conf.setInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, REDUCE_LIMIT);
+conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.0f);
+ApplicationId appId = ApplicationId.newInstance(1, 1);
+ApplicationAttemptId appAttemptId =
+ApplicationAttemptId.newInstance(appId, 1);
+JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+Job mockJob = mock(Job.class);
+when(mockJob.getReport()).thenReturn(
+MRBuilderUtils.newJobReport(jobId, "job",

hadoop git commit: YARN-5892. Support user-specific minimum user limit percentage in Capacity Scheduler. Contributed by Eric Payne.

2017-07-13 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 df69b13b4 -> d83e8712f


YARN-5892. Support user-specific minimum user limit percentage in Capacity 
Scheduler. Contributed by Eric Payne.

(cherry picked from commit ca13b224b2feb9c44de861da9cbba8dd2a12cb35)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d83e8712
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d83e8712
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d83e8712

Branch: refs/heads/branch-2
Commit: d83e8712f69511e1238095ec4bd64a9bd400a286
Parents: df69b13
Author: Sunil G 
Authored: Thu Jun 22 23:50:57 2017 -0700
Committer: Eric Payne 
Committed: Thu Jul 13 14:50:28 2017 -0500

--
 .../resource/DefaultResourceCalculator.java |   6 +
 .../resource/DominantResourceCalculator.java|   8 +
 .../yarn/util/resource/ResourceCalculator.java  |  23 +++
 .../hadoop/yarn/util/resource/Resources.java|   5 +
 .../scheduler/capacity/AbstractCSQueue.java |  23 +++
 .../scheduler/capacity/CSQueue.java |   7 +
 .../CapacitySchedulerConfiguration.java |  34 
 .../scheduler/capacity/LeafQueue.java   |  28 ++-
 .../scheduler/capacity/UserInfo.java|  15 +-
 .../scheduler/capacity/UsersManager.java| 172 +++
 .../scheduler/common/fica/FiCaSchedulerApp.java |   4 +-
 .../webapp/CapacitySchedulerPage.java   |   9 +-
 .../scheduler/capacity/TestLeafQueue.java   | 123 +
 .../src/site/markdown/CapacityScheduler.md  |   1 +
 14 files changed, 415 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d83e8712/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 524a049..bdf60bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -67,6 +67,12 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
+  public Resource divideAndCeil(Resource numerator, float denominator) {
+return Resources.createResource(
+divideAndCeil(numerator.getMemorySize(), denominator));
+  }
+
+  @Override
   public Resource normalize(Resource r, Resource minimumResource,
   Resource maximumResource, Resource stepFactor) {
 if (stepFactor.getMemorySize() == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d83e8712/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 9155ae3..7697e1d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -155,6 +155,14 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
+  public Resource divideAndCeil(Resource numerator, float denominator) {
+return Resources.createResource(
+divideAndCeil(numerator.getMemorySize(), denominator),
+divideAndCeil(numerator.getVirtualCores(), denominator)
+);
+  }
+
+  @Override
   public Resource normalize(Resource r, Resource minimumResource,
 Resource maximumResource, Resource stepFactor) {
 if (stepFactor.getMemorySize() == 0 || stepFactor.getVirtualCores() == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d83e8712/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
 
b/hado

hadoop git commit: YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)

2017-07-13 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk b61ab8573 -> 945c0958b


YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid 
unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)

Change-Id: I84ccd54200ccbaae23018ef320028e42b4c3509a


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/945c0958
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/945c0958
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/945c0958

Branch: refs/heads/trunk
Commit: 945c0958bb8df3dd9d5f1467f1216d2e6b0ee3d8
Parents: b61ab85
Author: Wangda Tan 
Authored: Thu Jul 13 10:30:15 2017 -0700
Committer: Wangda Tan 
Committed: Thu Jul 13 10:30:15 2017 -0700

--
 .../scheduler/activities/ActivitiesLogger.java  |  33 +++--
 .../scheduler/capacity/LeafQueue.java   |  83 ---
 .../capacity/TestCapacityScheduler.java | 146 ++-
 .../scheduler/capacity/TestLeafQueue.java   |  10 +-
 4 files changed, 231 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/945c0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
index 3f8ed55..12aff02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
@@ -63,9 +63,14 @@ public class ActivitiesLogger {
 SchedulerApplicationAttempt application, Priority priority,
 String diagnostic) {
   String type = "app";
-  recordActivity(activitiesManager, node, application.getQueueName(),
-  application.getApplicationId().toString(), priority,
-  ActivityState.REJECTED, diagnostic, type);
+  if (activitiesManager == null) {
+return;
+  }
+  if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
+recordActivity(activitiesManager, node, application.getQueueName(),
+application.getApplicationId().toString(), priority,
+ActivityState.REJECTED, diagnostic, type);
+  }
   finishSkippedAppAllocationRecording(activitiesManager,
   application.getApplicationId(), ActivityState.REJECTED, diagnostic);
 }
@@ -203,8 +208,13 @@ public class ActivitiesLogger {
 public static void recordQueueActivity(ActivitiesManager activitiesManager,
 SchedulerNode node, String parentQueueName, String queueName,
 ActivityState state, String diagnostic) {
-  recordActivity(activitiesManager, node, parentQueueName, queueName, null,
-  state, diagnostic, null);
+  if (activitiesManager == null) {
+return;
+  }
+  if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
+recordActivity(activitiesManager, node, parentQueueName, queueName,
+null, state, diagnostic, null);
+  }
 }
   }
 
@@ -266,13 +276,10 @@ public class ActivitiesLogger {
   private static void recordActivity(ActivitiesManager activitiesManager,
   SchedulerNode node, String parentName, String childName,
   Priority priority, ActivityState state, String diagnostic, String type) {
-if (activitiesManager == null) {
-  return;
-}
-if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
-  activitiesManager.addSchedulingActivityForNode(node.getNodeID(),
-  parentName, childName, priority != null ? priority.toString() : null,
-  state, diagnostic, type);
-}
+
+activitiesManager.addSchedulingActivityForNode(node.getNodeID(), 
parentName,
+childName, priority != null ? priority.toString() : null, state,
+diagnostic, type);
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/945c0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/h

hadoop git commit: HADOOP-14658. branch-2 compilation is broken in hadoop-azure Contributed by Sunil G.

2017-07-13 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cbb5f601f -> df69b13b4


HADOOP-14658. branch-2 compilation is broken in hadoop-azure
Contributed by Sunil G.

Signed-off-by: Steve Loughran 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df69b13b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df69b13b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df69b13b

Branch: refs/heads/branch-2
Commit: df69b13b4798eb69523d6e58b7619874ee412791
Parents: cbb5f60
Author: Steve Loughran 
Authored: Thu Jul 13 18:25:38 2017 +0100
Committer: Steve Loughran 
Committed: Thu Jul 13 18:25:38 2017 +0100

--
 .../java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df69b13b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
index 7c26e8a..2b22242 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.azure;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.azure.security.Constants;
 import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.http.Header;
 import org.apache.http.HttpResponse;
 import org.apache.http.HttpStatus;
@@ -190,7 +191,7 @@ public class WasbRemoteCallHelper {
 : urls[index]);
 } catch (IOException ioex) {
   String message =
-  "Encountered error while making remote call to " + String
+  "Encountered error while making remote call to " + StringUtils
   .join(",", urls) + " retried " + retry + " time(s).";
   LOG.error(message, ioex);
   throw new WasbRemoteCallException(message, ioex);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-2113. Add cross-user preemption within CapacityScheduler's leaf-queue. (Sunil G via wangda)

2017-07-13 Thread epayne
YARN-2113. Add cross-user preemption within CapacityScheduler's leaf-queue. 
(Sunil G via wangda)

Change-Id: I9b19f69788068be05b3295247cdd7b972f8a573c
(cherry picked from commit c583ab02c730be0a63d974039a78f2dc67dc2db6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbb5f601
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbb5f601
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbb5f601

Branch: refs/heads/branch-2
Commit: cbb5f601f124e6421fbdb01ecab43609d77b5c61
Parents: 956e83c
Author: Wangda Tan 
Authored: Mon May 22 14:26:13 2017 -0700
Committer: Eric Payne 
Committed: Thu Jul 13 10:44:07 2017 -0500

--
 .../resource/DefaultResourceCalculator.java |   5 +
 .../resource/DominantResourceCalculator.java|   5 +
 .../yarn/util/resource/ResourceCalculator.java  |   9 +
 .../hadoop/yarn/util/resource/Resources.java|   5 +
 .../CapacitySchedulerPreemptionContext.java |   5 +
 .../CapacitySchedulerPreemptionUtils.java   |   9 +-
 .../FifoIntraQueuePreemptionPlugin.java | 329 +--
 .../capacity/IntraQueueCandidatesSelector.java  | 110 ++-
 .../IntraQueuePreemptionComputePlugin.java  |  10 +-
 .../ProportionalCapacityPreemptionPolicy.java   |  24 +
 .../monitor/capacity/TempAppPerPartition.java   |   6 +-
 .../monitor/capacity/TempQueuePerPartition.java |  14 +
 .../monitor/capacity/TempUserPerPartition.java  |  88 ++
 .../CapacitySchedulerConfiguration.java |   8 +
 .../scheduler/capacity/LeafQueue.java   |  11 +-
 .../scheduler/capacity/UsersManager.java|  11 +-
 ...alCapacityPreemptionPolicyMockFramework.java |  89 +-
 ...ionalCapacityPreemptionPolicyIntraQueue.java |  30 +-
 ...cityPreemptionPolicyIntraQueueUserLimit.java | 899 +++
 ...pacityPreemptionPolicyIntraQueueWithDRF.java | 178 
 20 files changed, 1688 insertions(+), 157 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb5f601/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index ef7229c..524a049 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -121,4 +121,9 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   Resource smaller, Resource bigger) {
 return smaller.getMemorySize() <= bigger.getMemorySize();
   }
+
+  @Override
+  public boolean isAnyMajorResourceZero(Resource resource) {
+return resource.getMemorySize() == 0f;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb5f601/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 1457c28..9155ae3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -231,4 +231,9 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 return smaller.getMemorySize() <= bigger.getMemorySize()
 && smaller.getVirtualCores() <= bigger.getVirtualCores();
   }
+
+  @Override
+  public boolean isAnyMajorResourceZero(Resource resource) {
+return resource.getMemorySize() == 0f || resource.getVirtualCores() == 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb5f601/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apac

[1/2] hadoop git commit: YARN-2113. Add cross-user preemption within CapacityScheduler's leaf-queue. (Sunil G via wangda)

2017-07-13 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 956e83c10 -> cbb5f601f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb5f601/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java
new file mode 100644
index 000..7df52f9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java
@@ -0,0 +1,899 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
+
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Test class for IntraQueuePreemption scenarios.
+ */
+public class TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit
+extends
+  ProportionalCapacityPreemptionPolicyMockFramework {
+  @Before
+  public void setup() {
+super.setup();
+conf.setBoolean(
+CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
+policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
+  }
+
+  @Test
+  public void testSimpleIntraQueuePreemptionWithTwoUsers()
+  throws IOException {
+/**
+ * Queue structure is:
+ *
+ * 
+ *   root
+ *|
+ *a
+ * 
+ *
+ * Scenario:
+ * Preconditions:
+ *   Queue total resources: 100
+ *   Minimum user limit percent: 50%
+ *   +--+--+--+-+
+ *   | APP  | USER  | PRIORITY | USED | PENDING |
+ *   +--+--+--+-+
+ *   | app1 | user1 | 1| 100  | 0   |
+ *   | app2 | user2 | 1| 0| 30  |
+ *   +--+--+--+-+
+ * Hence in queueA of 100, each user has a quota of 50. app1 of high 
priority
+ * has a demand of 0 and its already using 100. app2 from user2 has a 
demand
+ * of 30, and UL is 50. 30 would be preempted from app1.
+ */
+
+// Set max preemption limit as 50%.
+conf.setFloat(CapacitySchedulerConfiguration.
+INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+(float) 0.5);
+
+String labelsConfig = "=100,true;";
+String nodesConfig = // n1 has no label
+"n1= res=100";
+String queuesConfig =
+// guaranteed,max,used,pending,reserved
+"root(=[100 100 100 30 0]);" + // root
+"-a(=[100 100 100 30 0])"; // a
+
+String appsConfig =
+// queueName\t(priority,resource,host,expression,#repeat,reserved,pending)
+"a\t" // app1 in a
++ "(1,1,n1,,100,false,0,user1);" + // app1 a
+"a\t" // app2 in a
++ "(1,1,n1,,0,false,30,user2)";
+
+buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+policy.editSchedule();
+
+// app2 needs more resource and its well under its user-limit. Hence 
preempt
+// resources from app1.
+verify(mDisp, times(30)).handle(argThat(
+new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+getAppAttemptId(1;
+  }
+
+  @Test
+  public void testNoIntraQueuePreemptionWithSingleUser()
+  throws IOException

hadoop git commit: HADOOP-14646. FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never runs. Contributed by Andras Bokor.

2017-07-13 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c9adca70e -> 7c8d4a1b3


HADOOP-14646. 
FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never 
runs. Contributed by Andras Bokor.

(cherry picked from commit b61ab8573eb2f224481118004f620fe9f18db74b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c8d4a1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c8d4a1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c8d4a1b

Branch: refs/heads/branch-2.8
Commit: 7c8d4a1b3fe07c26949a6b50e4b121e5cb3ac3b2
Parents: c9adca7
Author: Masatake Iwasaki 
Authored: Thu Jul 13 21:41:43 2017 +0900
Committer: Masatake Iwasaki 
Committed: Thu Jul 13 21:52:34 2017 +0900

--
 .../org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c8d4a1b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index f24a2f5..c835076 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -390,6 +390,7 @@ public abstract class FileContextMainOperationsBaseTest  {
 
   }
   
+  @Test
   public void testListStatusFilterWithSomeMatches() throws Exception {
 Path[] testDirs = {
 getTestRootPath(fc, TEST_DIR_AAA),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14646. FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never runs. Contributed by Andras Bokor.

2017-07-13 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 af8a227a6 -> 956e83c10


HADOOP-14646. 
FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never 
runs. Contributed by Andras Bokor.

(cherry picked from commit b61ab8573eb2f224481118004f620fe9f18db74b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/956e83c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/956e83c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/956e83c1

Branch: refs/heads/branch-2
Commit: 956e83c100161dbf692d8edac105322cfc0f1206
Parents: af8a227
Author: Masatake Iwasaki 
Authored: Thu Jul 13 21:41:43 2017 +0900
Committer: Masatake Iwasaki 
Committed: Thu Jul 13 21:43:10 2017 +0900

--
 .../org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/956e83c1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index a536e57..35ec4ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -391,6 +391,7 @@ public abstract class FileContextMainOperationsBaseTest  {
 
   }
   
+  @Test
   public void testListStatusFilterWithSomeMatches() throws Exception {
 Path[] testDirs = {
 getTestRootPath(fc, TEST_DIR_AAA),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14646. FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never runs. Contributed by Andras Bokor.

2017-07-13 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk cf0d0844d -> b61ab8573


HADOOP-14646. 
FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never 
runs. Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b61ab857
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b61ab857
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b61ab857

Branch: refs/heads/trunk
Commit: b61ab8573eb2f224481118004f620fe9f18db74b
Parents: cf0d084
Author: Masatake Iwasaki 
Authored: Thu Jul 13 21:41:43 2017 +0900
Committer: Masatake Iwasaki 
Committed: Thu Jul 13 21:41:43 2017 +0900

--
 .../org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61ab857/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index a536e57..35ec4ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -391,6 +391,7 @@ public abstract class FileContextMainOperationsBaseTest  {
 
   }
   
+  @Test
   public void testListStatusFilterWithSomeMatches() throws Exception {
 Path[] testDirs = {
 getTestRootPath(fc, TEST_DIR_AAA),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5731. Preemption calculation is not accurate when reserved containers are present in queue. Contributed by Wangda Tan.

2017-07-13 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk e15e2713e -> cf0d0844d


YARN-5731. Preemption calculation is not accurate when reserved containers are 
present in queue. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf0d0844
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf0d0844
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf0d0844

Branch: refs/heads/trunk
Commit: cf0d0844d6ae25d537391edb9b65fca05d1848e6
Parents: e15e271
Author: Sunil G 
Authored: Thu Jul 13 16:48:29 2017 +0530
Committer: Sunil G 
Committed: Thu Jul 13 16:48:29 2017 +0530

--
 .../capacity/FifoCandidatesSelector.java|  6 +-
 .../ProportionalCapacityPreemptionPolicy.java   | 22 -
 .../CapacitySchedulerPreemptionTestBase.java|  7 +-
 ...TestCapacitySchedulerSurgicalPreemption.java | 97 +++-
 4 files changed, 125 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf0d0844/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
index f4d7e92..f843db4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
@@ -43,12 +43,12 @@ public class FifoCandidatesSelector
   LogFactory.getLog(FifoCandidatesSelector.class);
   private PreemptableResourceCalculator preemptableAmountCalculator;
 
-  FifoCandidatesSelector(
-  CapacitySchedulerPreemptionContext preemptionContext) {
+  FifoCandidatesSelector(CapacitySchedulerPreemptionContext preemptionContext,
+  boolean includeReservedResource) {
 super(preemptionContext);
 
 preemptableAmountCalculator = new PreemptableResourceCalculator(
-preemptionContext, false);
+preemptionContext, includeReservedResource);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf0d0844/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 76d6637..719d2eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -232,7 +232,27 @@ public class ProportionalCapacityPreemptionPolicy
 }
 
 // initialize candidates preemption selection policies
-candidatesSelectionPolicies.add(new FifoCandidatesSelector(this));
+// When select candidates for reserved containers is enabled, exclude 
reserved
+// resource in fifo policy (less aggressive). Otherwise include reserved
+// resource.
+//
+// Why doing this? In YARN-4390, we added 
preemption-based-on-reserved-container
+// Support. To reduce unnecessary preemption for large containers. We will
+// not include reserved resources while calculating ideal-allocation in
+// FifoCandidatesSelector.
+//
+// Changes in YARN-4390 will significantly reduce number of containers 
preempted
+// When cluster has heterogeneous container requests. (Please check test
+// report: 
https://issues.apache.org/ji

hadoop git commit: YARN-6786. [YARN-3926] ResourcePBImpl imports cleanup. Contributed by Yeliang Cang.

2017-07-13 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3926 6c7f012e5 -> 419bb55cf


YARN-6786. [YARN-3926] ResourcePBImpl imports cleanup. Contributed by Yeliang 
Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/419bb55c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/419bb55c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/419bb55c

Branch: refs/heads/YARN-3926
Commit: 419bb55cf56f8ee64a620c5bc9031349ef8c8d42
Parents: 6c7f012
Author: Sunil G 
Authored: Thu Jul 13 16:30:59 2017 +0530
Committer: Sunil G 
Committed: Thu Jul 13 16:30:59 2017 +0530

--
 .../apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/419bb55c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index a9abed9..7bc7f5f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.api.records.impl.pb;
 
-import org.apache.commons.collections.map.UnmodifiableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -34,7 +33,10 @@ import 
org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 
-import java.util.*;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Collections;
+
 
 @Private
 @Unstable


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12129. Ozone: SCM http server is not stopped with SCM#stop(). Contributed by Weiwei Yang.

2017-07-13 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 87154fcd6 -> 35c55ebd0


HDFS-12129. Ozone: SCM http server is not stopped with SCM#stop(). Contributed 
by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35c55ebd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35c55ebd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35c55ebd

Branch: refs/heads/HDFS-7240
Commit: 35c55ebd0e649849061d11b325f888fb3d9471d9
Parents: 87154fc
Author: Weiwei Yang 
Authored: Thu Jul 13 16:10:58 2017 +0800
Committer: Weiwei Yang 
Committed: Thu Jul 13 16:10:58 2017 +0800

--
 .../apache/hadoop/ozone/scm/StorageContainerManager.java  | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c55ebd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
index 5d60d96..4c10387 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
@@ -509,8 +509,16 @@ public class StorageContainerManager
   LOG.info("Stopping the RPC server for DataNodes");
   datanodeRpcServer.stop();
 } catch (Exception ex) {
-  LOG.error("Storage Container Manager httpServer stop failed.", ex);
+  LOG.error("Storage Container Manager datanodeRpcServer stop failed.", 
ex);
 }
+
+try {
+  LOG.info("Stopping Storage Container Manager HTTP server.");
+  httpServer.stop();
+} catch (Exception ex) {
+  LOG.error("Storage Container Manager HTTP server stop failed.", ex);
+}
+
 unregisterMXBean();
 IOUtils.closeQuietly(scmContainerManager);
 IOUtils.closeQuietly(scmBlockManager);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org