hadoop git commit: HDFS-11378. Verify multiple DataNodes can be decommissioned/maintenance at the same time. (Manoj Govindassamy via mingma)

2017-01-27 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 528bff9c4 -> 34f9ceab4


HDFS-11378. Verify multiple DataNodes can be decommissioned/maintenance at the 
same time. (Manoj Govindassamy via mingma)

(cherry picked from commit 312b36d113d83640b92c62fdd91ede74bd04c00f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34f9ceab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34f9ceab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34f9ceab

Branch: refs/heads/branch-2
Commit: 34f9ceab4a53007bba485b51fbd909dae5198148
Parents: 528bff9
Author: Ming Ma 
Authored: Fri Jan 27 16:16:42 2017 -0800
Committer: Ming Ma 
Committed: Fri Jan 27 16:17:51 2017 -0800

--
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java | 151 +--
 .../apache/hadoop/hdfs/TestDecommission.java|  43 ++
 .../hadoop/hdfs/TestMaintenanceState.java   |  36 +
 3 files changed, 186 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34f9ceab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
index 534c5e0..c4ccc67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
@@ -22,11 +22,13 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -149,10 +151,18 @@ public class AdminStatesBaseTest {
 }
   }
 
-  /*
-   * decommission the DN or put the DN into maintenance for datanodeUuid or one
-   * random node if datanodeUuid is null.
-   * And wait for the node to reach the given {@code waitForState}.
+  /**
+   * Decommission or perform Maintenance for DataNodes and wait for them to
+   * reach the expected state.
+   *
+   * @param nnIndex NameNode index
+   * @param datanodeUuid DataNode to decommission/maintenance, or a random
+   * DataNode if null
+   * @param maintenanceExpirationInMS Maintenance expiration time
+   * @param decommissionedNodes List of DataNodes already decommissioned
+   * @param waitForState Await for this state for datanodeUuid DataNode
+   * @return DatanodeInfo DataNode taken out of service
+   * @throws IOException
*/
   protected DatanodeInfo takeNodeOutofService(int nnIndex,
   String datanodeUuid, long maintenanceExpirationInMS,
@@ -162,48 +172,91 @@ public class AdminStatesBaseTest {
 maintenanceExpirationInMS, decommissionedNodes, null, waitForState);
   }
 
-  /*
-   * decommission the DN or put the DN to maintenance set by datanodeUuid
-   * Pick randome node if datanodeUuid == null
-   * wait for the node to reach the given {@code waitForState}.
+  /**
+   * Decommission or perform Maintenance for DataNodes and wait for them to
+   * reach the expected state.
+   *
+   * @param nnIndex NameNode index
+   * @param datanodeUuid DataNode to decommission/maintenance, or a random
+   * DataNode if null
+   * @param maintenanceExpirationInMS Maintenance expiration time
+   * @param decommissionedNodes List of DataNodes already decommissioned
+   * @param inMaintenanceNodes Map of DataNodes already entering/in maintenance
+   * @param waitForState Await for this state for datanodeUuid DataNode
+   * @return DatanodeInfo DataNode taken out of service
+   * @throws IOException
*/
   protected DatanodeInfo takeNodeOutofService(int nnIndex,
   String datanodeUuid, long maintenanceExpirationInMS,
   List decommissionedNodes,
   Map inMaintenanceNodes, AdminStates waitForState)
   throws IOException {
+return takeNodeOutofService(nnIndex, (datanodeUuid != null ?
+Lists.newArrayList(datanodeUuid) : null),
+maintenanceExpirationInMS, decommissionedNodes, inMaintenanceNodes,
+waitForState).get(0);
+  }
+
+  /**
+   * Decommission or perform Maintenance for DataNodes and wait for them to
+   * reach the expected state.
+   *
+   * @param nnIndex NameNode index
+   * @param dataNodeUuids DataNodes to decommission/maintenance, or a random
+   * DataNode if null
+   * @param maintenanceExpirationInM

hadoop git commit: HDFS-11378. Verify multiple DataNodes can be decommissioned/maintenance at the same time. (Manoj Govindassamy via mingma)

2017-01-27 Thread mingma
Repository: hadoop
Updated Branches:
  refs/heads/trunk ebd40056a -> 312b36d11


HDFS-11378. Verify multiple DataNodes can be decommissioned/maintenance at the 
same time. (Manoj Govindassamy via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/312b36d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/312b36d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/312b36d1

Branch: refs/heads/trunk
Commit: 312b36d113d83640b92c62fdd91ede74bd04c00f
Parents: ebd4005
Author: Ming Ma 
Authored: Fri Jan 27 16:16:42 2017 -0800
Committer: Ming Ma 
Committed: Fri Jan 27 16:16:42 2017 -0800

--
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java | 151 +--
 .../apache/hadoop/hdfs/TestDecommission.java|  43 ++
 .../hadoop/hdfs/TestMaintenanceState.java   |  36 +
 3 files changed, 186 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/312b36d1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
index 0ed01f7..c0cef19 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
@@ -22,11 +22,13 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -149,10 +151,18 @@ public class AdminStatesBaseTest {
 }
   }
 
-  /*
-   * decommission the DN or put the DN into maintenance for datanodeUuid or one
-   * random node if datanodeUuid is null.
-   * And wait for the node to reach the given {@code waitForState}.
+  /**
+   * Decommission or perform Maintenance for DataNodes and wait for them to
+   * reach the expected state.
+   *
+   * @param nnIndex NameNode index
+   * @param datanodeUuid DataNode to decommission/maintenance, or a random
+   * DataNode if null
+   * @param maintenanceExpirationInMS Maintenance expiration time
+   * @param decommissionedNodes List of DataNodes already decommissioned
+   * @param waitForState Await for this state for datanodeUuid DataNode
+   * @return DatanodeInfo DataNode taken out of service
+   * @throws IOException
*/
   protected DatanodeInfo takeNodeOutofService(int nnIndex,
   String datanodeUuid, long maintenanceExpirationInMS,
@@ -162,48 +172,91 @@ public class AdminStatesBaseTest {
 maintenanceExpirationInMS, decommissionedNodes, null, waitForState);
   }
 
-  /*
-   * decommission the DN or put the DN to maintenance set by datanodeUuid
-   * Pick randome node if datanodeUuid == null
-   * wait for the node to reach the given {@code waitForState}.
+  /**
+   * Decommission or perform Maintenance for DataNodes and wait for them to
+   * reach the expected state.
+   *
+   * @param nnIndex NameNode index
+   * @param datanodeUuid DataNode to decommission/maintenance, or a random
+   * DataNode if null
+   * @param maintenanceExpirationInMS Maintenance expiration time
+   * @param decommissionedNodes List of DataNodes already decommissioned
+   * @param inMaintenanceNodes Map of DataNodes already entering/in maintenance
+   * @param waitForState Await for this state for datanodeUuid DataNode
+   * @return DatanodeInfo DataNode taken out of service
+   * @throws IOException
*/
   protected DatanodeInfo takeNodeOutofService(int nnIndex,
   String datanodeUuid, long maintenanceExpirationInMS,
   List decommissionedNodes,
   Map inMaintenanceNodes, AdminStates waitForState)
   throws IOException {
+return takeNodeOutofService(nnIndex, (datanodeUuid != null ?
+Lists.newArrayList(datanodeUuid) : null),
+maintenanceExpirationInMS, decommissionedNodes, inMaintenanceNodes,
+waitForState).get(0);
+  }
+
+  /**
+   * Decommission or perform Maintenance for DataNodes and wait for them to
+   * reach the expected state.
+   *
+   * @param nnIndex NameNode index
+   * @param dataNodeUuids DataNodes to decommission/maintenance, or a random
+   * DataNode if null
+   * @param maintenanceExpirationInMS Maintenance expiration time
+   * @param decommissionedNodes List of DataN

[24/50] [abbrv] hadoop git commit: YARN-5324. Stateless Federation router policies implementation. (Carlo Curino via Subru).

2017-01-27 Thread subru
YARN-5324. Stateless Federation router policies implementation. (Carlo Curino 
via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15ecaa40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15ecaa40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15ecaa40

Branch: refs/heads/YARN-2915
Commit: 15ecaa40a60cb118a468a1230c6c124a477957e6
Parents: d05c16a
Author: Subru Krishnan 
Authored: Thu Sep 22 17:06:57 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../policies/FederationPolicyConfigurator.java  |  91 ---
 .../FederationPolicyInitializationContext.java  |  11 +-
 .../policies/FederationPolicyManager.java   | 126 +
 .../policies/FederationPolicyWriter.java|  45 
 .../policies/dao/WeightedPolicyInfo.java| 253 +++
 .../federation/policies/dao/package-info.java   |  20 ++
 .../router/BaseWeightedRouterPolicy.java| 150 +++
 .../policies/router/LoadBasedRouterPolicy.java  | 109 
 .../policies/router/PriorityRouterPolicy.java   |  66 +
 .../router/UniformRandomRouterPolicy.java   |  85 +++
 .../router/WeightedRandomRouterPolicy.java  |  79 ++
 .../store/records/SubClusterIdInfo.java |  75 ++
 .../policies/BaseFederationPoliciesTest.java| 155 
 ...ionPolicyInitializationContextValidator.java |  17 +-
 .../router/TestLoadBasedRouterPolicy.java   | 109 
 .../router/TestPriorityRouterPolicy.java|  87 +++
 .../router/TestUniformRandomRouterPolicy.java   |  65 +
 .../router/TestWeightedRandomRouterPolicy.java  | 127 ++
 .../utils/FederationPoliciesTestUtil.java   |  82 +-
 19 files changed, 1604 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ecaa40/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
deleted file mode 100644
index fdc3857..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.federation.policies;
-
-import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
-
-
-import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
-
-import org.apache.hadoop.yarn.server.federation.policies.router
-.FederationRouterPolicy;
-
-/**
- * Implementors of this interface are capable to instantiate and (re)initalize
- * {@link FederationAMRMProxyPolicy} and {@link FederationRouterPolicy} based 
on
- * a {@link FederationPolicyInitializationContext}. The reason to bind these 
two
- * policies together is to make sure we remain consistent across the router and
- * amrmproxy policy decisions.
- */
-public interface FederationPolicyConfigurator {
-
-  /**
-   * If the current instance is compatible, this method returns the same
-   * instance of {@link FederationAMRMProxyPolicy} reinitialized with the
-   * current context, otherwise a new instance initialized with the current
-   * context is provided. If the instance is compatible with the current class
-   * the implementors should attempt to reinitalize (retaining state). To 
affect
-   * a complete policy reset oldInstance should be null.
-   *
-   * @p

[38/50] [abbrv] hadoop git commit: YARN-5872. Add AlwayReject policies for router and amrmproxy. (Carlo Curino via Subru).

2017-01-27 Thread subru
YARN-5872. Add AlwayReject policies for router and amrmproxy. (Carlo Curino via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08dc0958
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08dc0958
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08dc0958

Branch: refs/heads/YARN-2915
Commit: 08dc09581230ba595ce48fe7d3bc4eb2b6f98091
Parents: dc86175
Author: Subru Krishnan 
Authored: Tue Nov 22 18:37:30 2016 -0800
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../amrmproxy/RejectAMRMProxyPolicy.java| 67 +
 .../manager/RejectAllPolicyManager.java | 40 ++
 .../policies/router/RejectRouterPolicy.java | 66 +
 .../amrmproxy/TestRejectAMRMProxyPolicy.java| 78 
 .../manager/TestRejectAllPolicyManager.java | 40 ++
 .../policies/router/TestRejectRouterPolicy.java | 63 
 6 files changed, 354 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08dc0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
new file mode 100644
index 000..3783df6
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+
+/**
+ * An implementation of the {@link FederationAMRMProxyPolicy} that simply
+ * rejects all requests. Useful to prevent apps from accessing any sub-cluster.
+ */
+public class RejectAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
+
+  private Set knownClusterIds = new HashSet<>();
+
+  @Override
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
+  throws FederationPolicyInitializationException {
+// overrides initialize to avoid weight checks that do no apply for
+// this policy.
+FederationPolicyInitializationContextValidator.validate(policyContext,
+this.getClass().getCanonicalName());
+setPolicyContext(policyContext);
+  }
+
+  @Override
+  public Map> splitResourceRequests(
+  List resourceRequests) throws YarnException {
+throw new FederationPolicyException("The policy configured for this queue "
++ "rejects all routing requests by construction.");
+  }
+
+  @Override
+  public void notifyOfResponse(SubClusterId subClusterId,
+  AllocateResponse response) throws YarnException {
+// This might be invoked for applications started with a previous policy,
+// do nothing for this policy.

[26/50] [abbrv] hadoop git commit: YARN-5676. Add a HashBasedRouterPolicy, and small policies and test refactoring. (Carlo Curino via Subru).

2017-01-27 Thread subru
YARN-5676. Add a HashBasedRouterPolicy, and small policies and test 
refactoring. (Carlo Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fd588f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fd588f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fd588f5

Branch: refs/heads/YARN-2915
Commit: 3fd588f5253e8a7ce519202d2fa61d55f991a0ad
Parents: ace063a
Author: Subru Krishnan 
Authored: Tue Nov 22 15:02:22 2016 -0800
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +-
 .../policies/AbstractPolicyManager.java | 175 -
 .../policies/FederationPolicyManager.java   | 117 
 .../PriorityBroadcastPolicyManager.java |  66 ---
 .../federation/policies/RouterPolicyFacade.java |   1 +
 .../policies/UniformBroadcastPolicyManager.java |  56 --
 .../policies/WeightedLocalityPolicyManager.java |  67 ---
 .../policies/manager/AbstractPolicyManager.java | 190 +++
 .../manager/FederationPolicyManager.java| 118 
 .../manager/HashBroadcastPolicyManager.java |  38 
 .../manager/PriorityBroadcastPolicyManager.java |  66 +++
 .../manager/UniformBroadcastPolicyManager.java  |  44 +
 .../manager/WeightedLocalityPolicyManager.java  |  67 +++
 .../policies/manager/package-info.java  |  19 ++
 .../policies/router/AbstractRouterPolicy.java   |  19 ++
 .../policies/router/HashBasedRouterPolicy.java  |  81 
 .../policies/router/LoadBasedRouterPolicy.java  |   3 +
 .../policies/router/PriorityRouterPolicy.java   |   3 +
 .../router/UniformRandomRouterPolicy.java   |  10 +-
 .../router/WeightedRandomRouterPolicy.java  |   3 +
 .../policies/BaseFederationPoliciesTest.java|  17 +-
 .../policies/BasePolicyManagerTest.java | 108 ---
 ...ionPolicyInitializationContextValidator.java |   1 +
 .../TestPriorityBroadcastPolicyManager.java |  72 ---
 .../policies/TestRouterPolicyFacade.java|   2 +
 .../TestUniformBroadcastPolicyManager.java  |  40 
 .../TestWeightedLocalityPolicyManager.java  |  79 
 .../policies/manager/BasePolicyManagerTest.java | 104 ++
 .../TestHashBasedBroadcastPolicyManager.java|  40 
 .../TestPriorityBroadcastPolicyManager.java |  72 +++
 .../TestUniformBroadcastPolicyManager.java  |  40 
 .../TestWeightedLocalityPolicyManager.java  |  79 
 .../policies/router/BaseRouterPoliciesTest.java |  51 +
 .../router/TestHashBasedRouterPolicy.java   |  83 
 .../router/TestLoadBasedRouterPolicy.java   |   3 +-
 .../router/TestPriorityRouterPolicy.java|   3 +-
 .../router/TestUniformRandomRouterPolicy.java   |   3 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  15 +-
 38 files changed, 1160 insertions(+), 798 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fd588f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e8e76d1..565c263 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2523,7 +2523,8 @@ public class YarnConfiguration extends Configuration {
   + "policy-manager";
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER = "org.apache"
-  + 
".hadoop.yarn.server.federation.policies.UniformBroadcastPolicyManager";
+  + ".hadoop.yarn.server.federation.policies"
+  + ".manager.UniformBroadcastPolicyManager";
 
   public static final String FEDERATION_POLICY_MANAGER_PARAMS =
   FEDERATION_PREFIX + "policy-manager-params";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fd588f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/fed

[41/50] [abbrv] hadoop git commit: YARN-5390. Federation Subcluster Resolver. Contributed by Ellen Hui.

2017-01-27 Thread subru
YARN-5390. Federation Subcluster Resolver. Contributed by Ellen Hui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa3adb52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa3adb52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa3adb52

Branch: refs/heads/YARN-2915
Commit: fa3adb52ef08db2650e1bfb82751e869e0651fa5
Parents: c15afed
Author: Subru Krishnan 
Authored: Thu Aug 4 15:58:31 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   8 +
 .../src/main/resources/yarn-default.xml |   7 +
 .../hadoop-yarn-server-common/pom.xml   |  10 +
 .../resolver/AbstractSubClusterResolver.java|  67 +++
 .../resolver/DefaultSubClusterResolverImpl.java | 164 +
 .../federation/resolver/SubClusterResolver.java |  58 ++
 .../federation/resolver/package-info.java   |  17 ++
 .../resolver/TestDefaultSubClusterResolver.java | 184 +++
 .../src/test/resources/nodes|   4 +
 .../src/test/resources/nodes-malformed  |   3 +
 10 files changed, 522 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3adb52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7887fbc..122ab15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2479,6 +2479,14 @@ public class YarnConfiguration extends Configuration {
   public static final int DEFAULT_SHARED_CACHE_NM_UPLOADER_THREAD_COUNT = 20;
 
   
+  // Federation Configs
+  
+
+  public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation.";
+  public static final String FEDERATION_MACHINE_LIST =
+  FEDERATION_PREFIX + "machine-list";
+
+  
   // Other Configs
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3adb52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 1e929a8..79e0e9c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2611,6 +2611,13 @@
   
 
   
+
+  Machine list file to be loaded by the FederationSubCluster Resolver
+
+yarn.federation.machine-list
+  
+
+  
 The interval that the yarn client library uses to poll the
 completion status of the asynchronous API of application client protocol.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3adb52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 9cc3cae..6d2fbef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -177,6 +177,16 @@
   
 
   
+  
+org.apache.rat
+apache-rat-plugin
+
+  
+src/test/resources/nodes
+src/test/resources/nodes-malformed
+  
+
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3adb52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
 
b/hadoop-yarn-project/hadoop-yarn/

[44/50] [abbrv] hadoop git commit: YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni Matteo Fumarola via Subru)

2017-01-27 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5be22df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
new file mode 100644
index 000..13175ae
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
@@ -0,0 +1,1265 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.utils;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for FederationApplicationInputValidator,
+ * FederationMembershipInputValidator, and FederationPolicyInputValidator.
+ */
+public class TestFederationStateStoreInputValidator {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestFederationStateStoreInputValidator.class);
+
+  private static SubClusterId subClusterId;
+  private static String amRMServiceAddress;
+  private static String clientRMServiceAddress;
+  private static String rmAdminServiceAddress;
+  private static String rmWebServiceAddress;
+  private static int lastHeartBeat;
+  private static SubClusterState stateNew;
+  private static SubClusterState stateLost;
+  private static ApplicationId appId;
+  private static int lastStartTime;
+  private static String capability;
+  private static String queue;
+  private static String type;
+  private static ByteBuffer params;
+
+  private static SubClusterId subClusterIdInvalid;
+  private static SubClusterId subClusterIdNull;
+
+  private static int lastHeartBeatNegative;
+  private static int lastStartTimeNegative;
+
+  private static SubClusterState stateNull;
+  private static ApplicationId appIdNull;
+
+  private static String capabilityNull;
+  private static String capabilityEmpty;
+
+  private static String addressNull;
+  private static String addressEmpty;
+  private static String addressWrong;
+  private static String addressWrongPort;
+
+  private static String queueEmpty;
+  private static String q

[48/50] [abbrv] hadoop git commit: YARN-5300. Exclude generated federation protobuf sources from YARN Javadoc/findbugs build

2017-01-27 Thread subru
YARN-5300. Exclude generated federation protobuf sources from YARN 
Javadoc/findbugs build


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25014a32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25014a32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25014a32

Branch: refs/heads/YARN-2915
Commit: 25014a32808e8b43fa0aa6a869a27ab07f14a8f1
Parents: ebd4005
Author: Subru Krishnan 
Authored: Tue Jul 19 15:08:25 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml | 3 +++
 hadoop-yarn-project/hadoop-yarn/pom.xml  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25014a32/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index c090749..2f5451d 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -21,6 +21,9 @@
 
   
   
+
+  
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25014a32/hadoop-yarn-project/hadoop-yarn/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/pom.xml
index c43588a..99b8b5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -75,7 +75,7 @@
 org.apache.maven.plugins
 maven-javadoc-plugin
 
-  
org.apache.hadoop.yarn.proto
+  
org.apache.hadoop.yarn.proto:org.apache.hadoop.yarn.federation.proto
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: YARN-5323. Policies APIs for Federation Router and AMRMProxy policies. (Carlo Curino via Subru).

2017-01-27 Thread subru
YARN-5323. Policies APIs for Federation Router and AMRMProxy policies. (Carlo 
Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d05c16a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d05c16a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d05c16a6

Branch: refs/heads/YARN-2915
Commit: d05c16a69a60c49ab7f0c9a7a9c81a9eaa5a37b5
Parents: 05db3df
Author: Subru Krishnan 
Authored: Wed Sep 7 17:33:34 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../policies/ConfigurableFederationPolicy.java  |  44 +++
 .../policies/FederationPolicyConfigurator.java  |  91 +
 .../FederationPolicyInitializationContext.java  | 109 
 ...ionPolicyInitializationContextValidator.java |  82 
 .../policies/FederationPolicyWriter.java|  45 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  66 ++
 .../policies/amrmproxy/package-info.java|  20 +++
 .../exceptions/FederationPolicyException.java   |  33 +
 ...FederationPolicyInitializationException.java |  33 +
 .../NoActiveSubclustersException.java   |  27 
 .../exceptions/UnknownSubclusterException.java  |  28 
 .../policies/exceptions/package-info.java   |  20 +++
 .../federation/policies/package-info.java   |  20 +++
 .../policies/router/FederationRouterPolicy.java |  45 +++
 .../policies/router/package-info.java   |  20 +++
 ...ionPolicyInitializationContextValidator.java | 128 +++
 .../utils/FederationPoliciesTestUtil.java   |  83 
 17 files changed, 894 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d05c16a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
new file mode 100644
index 000..fd6ceea
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+
+/**
+ * This interface provides a general method to reinitialize a policy. The
+ * semantics are try-n-swap, so in case of an exception is thrown the
+ * implmentation must ensure the previous state and configuration is preserved.
+ */
+public interface ConfigurableFederationPolicy {
+
+  /**
+   * This method is invoked to initialize of update the configuration of
+   * policies. The implementor should provide try-n-swap semantics, and retain
+   * state if possible.
+   *
+   * @param federationPolicyInitializationContext the new context to provide to
+   *  implementor.
+   *
+   * @throws FederationPolicyInitializationException in case the initialization
+   * fails.
+   */
+  void reinitialize(
+  FederationPolicyInitializationContext
+  federationPolicyInitializationContext)
+  throws FederationPolicyInitializationException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d05c16a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
--

[49/50] [abbrv] hadoop git commit: YARN-5307. Federation Application State Store internal APIs

2017-01-27 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be8b7e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java
new file mode 100644
index 000..8b72a1e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java
@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.records.impl.pb;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterResponseProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterResponseProtoOrBuilder;
+import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse;
+
+import com.google.protobuf.TextFormat;
+
+/**
+ * Protocol buffer based implementation of
+ * {@link GetApplicationsHomeSubClusterResponse}.
+ */
+@Private
+@Unstable
+public class GetApplicationsHomeSubClusterResponsePBImpl
+extends GetApplicationsHomeSubClusterResponse {
+
+  private GetApplicationsHomeSubClusterResponseProto proto =
+  GetApplicationsHomeSubClusterResponseProto.getDefaultInstance();
+  private GetApplicationsHomeSubClusterResponseProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private List appsHomeSubCluster;
+
+  public GetApplicationsHomeSubClusterResponsePBImpl() {
+builder = GetApplicationsHomeSubClusterResponseProto.newBuilder();
+  }
+
+  public GetApplicationsHomeSubClusterResponsePBImpl(
+  GetApplicationsHomeSubClusterResponseProto proto) {
+this.proto = proto;
+viaProto = true;
+  }
+
+  public GetApplicationsHomeSubClusterResponseProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
+  }
+
+  private void mergeLocalToProto() {
+if (viaProto) {
+  maybeInitBuilder();
+}
+mergeLocalToBuilder();
+proto = builder.build();
+viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = GetApplicationsHomeSubClusterResponseProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
+  private void mergeLocalToBuilder() {
+if (this.appsHomeSubCluster != null) {
+  addSubClustersInfoToProto();
+}
+  }
+
+  @Override
+  public int hashCode() {
+return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+if (other == null) {
+  return false;
+}
+if (other.getClass().isAssignableFrom(this.getClass())) {
+  return this.getProto().equals(this.getClass().cast(other).getProto());
+}
+return false;
+  }
+
+  @Override
+  public String toString() {
+return TextFormat.shortDebugString(getProto());
+  }
+
+  @Override
+  public List getAppsHomeSubClusters() {
+initSubClustersInfoList();
+return appsHomeSubCluster;
+  }
+
+  @Override
+  public void setAppsHomeSubClusters(
+  List appsHomeSubClusters) {
+maybeInitBuilder();
+if (a

[36/50] [abbrv] hadoop git commit: YARN-3672. Create Facade for Federation State and Policy Store. Contributed by Subru Krishnan

2017-01-27 Thread subru
YARN-3672. Create Facade for Federation State and Policy Store. Contributed by 
Subru Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9047b7b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9047b7b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9047b7b2

Branch: refs/heads/YARN-2915
Commit: 9047b7b2b7ac968ffe0800525c0a4218a1bd9577
Parents: 4a3ef84
Author: Jian He 
Authored: Wed Aug 17 11:13:19 2016 +0800
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 hadoop-project/pom.xml  |  13 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../src/main/resources/yarn-default.xml |  20 +-
 .../hadoop-yarn-server-common/pom.xml   |  10 +
 .../utils/FederationStateStoreFacade.java   | 532 +++
 .../server/federation/utils/package-info.java   |  17 +
 .../utils/FederationStateStoreTestUtil.java | 149 ++
 .../utils/TestFederationStateStoreFacade.java   | 148 ++
 9 files changed, 905 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9047b7b2/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index d3579a1..f0fe2cf 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -93,6 +93,9 @@
 2.0.0-M21
 1.0.0-M33
 
+1.0.0
+3.0.3
+
 
 1.8
 
@@ -1245,6 +1248,16 @@
   kerb-simplekdc
   1.0.0-RC2
 
+
+  javax.cache
+  cache-api
+  ${jcache.version}
+
+
+  org.ehcache
+  ehcache
+  ${ehcache.version}
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9047b7b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 122ab15..425ed28 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2483,6 +2483,19 @@ public class YarnConfiguration extends Configuration {
   
 
   public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation.";
+
+  public static final String FEDERATION_STATESTORE_CLIENT_CLASS =
+  FEDERATION_PREFIX + "state-store.class";
+
+  public static final String DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS =
+  
"org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore";
+
+  public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
+  FEDERATION_PREFIX + "cache-ttl.secs";
+
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9047b7b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 3da4bab..bfc2534 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -68,6 +68,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
 
configurationPropsToSkipCompare.add(YarnConfiguration.CURATOR_LEADER_ELECTOR);
 
+// Federation default configs to be ignored
+configurationPropsToSkipCompare
+.add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS);
+
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"
 configurationPropsToSkipCompare.add(YarnConfiguration.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9

[30/50] [abbrv] hadoop git commit: YARN-3662. Federation Membership State Store internal APIs.

2017-01-27 Thread subru
YARN-3662. Federation Membership State Store internal APIs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a2dbfba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a2dbfba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a2dbfba

Branch: refs/heads/YARN-2915
Commit: 6a2dbfba7d04dd4337631a0305ffefac46b634df
Parents: 25014a3
Author: Subru Krishnan 
Authored: Fri Jul 29 16:53:40 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../hadoop-yarn-server-common/pom.xml   |   8 +
 .../store/FederationMembershipStateStore.java   | 126 +
 .../server/federation/store/package-info.java   |  17 ++
 .../store/records/GetSubClusterInfoRequest.java |  62 +
 .../records/GetSubClusterInfoResponse.java  |  62 +
 .../records/GetSubClustersInfoRequest.java  |  66 +
 .../records/GetSubClustersInfoResponse.java |  66 +
 .../records/SubClusterDeregisterRequest.java|  89 +++
 .../records/SubClusterDeregisterResponse.java   |  42 +++
 .../records/SubClusterHeartbeatRequest.java | 149 +++
 .../records/SubClusterHeartbeatResponse.java|  45 
 .../federation/store/records/SubClusterId.java  | 100 +++
 .../store/records/SubClusterInfo.java   | 263 ++
 .../records/SubClusterRegisterRequest.java  |  74 +
 .../records/SubClusterRegisterResponse.java |  44 +++
 .../store/records/SubClusterState.java  |  60 +
 .../impl/pb/GetSubClusterInfoRequestPBImpl.java | 125 +
 .../pb/GetSubClusterInfoResponsePBImpl.java | 134 ++
 .../pb/GetSubClustersInfoRequestPBImpl.java | 108 
 .../pb/GetSubClustersInfoResponsePBImpl.java| 184 +
 .../pb/SubClusterDeregisterRequestPBImpl.java   | 156 +++
 .../pb/SubClusterDeregisterResponsePBImpl.java  |  77 ++
 .../pb/SubClusterHeartbeatRequestPBImpl.java| 192 +
 .../pb/SubClusterHeartbeatResponsePBImpl.java   |  77 ++
 .../records/impl/pb/SubClusterIdPBImpl.java |  75 ++
 .../records/impl/pb/SubClusterInfoPBImpl.java   | 267 +++
 .../pb/SubClusterRegisterRequestPBImpl.java | 134 ++
 .../pb/SubClusterRegisterResponsePBImpl.java|  77 ++
 .../store/records/impl/pb/package-info.java |  17 ++
 .../federation/store/records/package-info.java  |  17 ++
 .../proto/yarn_server_federation_protos.proto   |  93 +++
 .../records/TestFederationProtocolRecords.java  | 133 +
 32 files changed, 3139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a2dbfba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index fc23af8..9cc3cae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -58,6 +58,13 @@
   org.apache.hadoop
   hadoop-yarn-common
 
+
+
+  org.apache.hadoop
+  hadoop-yarn-common
+  test-jar
+  test
+
 
 
   com.google.guava
@@ -146,6 +153,7 @@
   yarn_server_common_protos.proto
   yarn_server_common_service_protos.proto
   yarn_server_common_service_protos.proto
+  yarn_server_federation_protos.proto
   ResourceTracker.proto
   SCMUploader.proto
   collectornodemanager_protocol.proto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a2dbfba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
new file mode 100644
index 000..378eadc
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreement

[34/50] [abbrv] hadoop git commit: YARN-3664. Federation PolicyStore internal APIs

2017-01-27 Thread subru
YARN-3664. Federation PolicyStore internal APIs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b040ef2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b040ef2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b040ef2

Branch: refs/heads/YARN-2915
Commit: 4b040ef21d1d5405f26923b2b8ffda480db4af69
Parents: 6be8b7e
Author: Subru Krishnan 
Authored: Fri Aug 5 12:34:58 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../federation/store/FederationPolicyStore.java |  76 
 ...SubClusterPoliciesConfigurationsRequest.java |  35 
 ...ubClusterPoliciesConfigurationsResponse.java |  66 +++
 ...GetSubClusterPolicyConfigurationRequest.java |  62 ++
 ...etSubClusterPolicyConfigurationResponse.java |  65 +++
 ...SetSubClusterPolicyConfigurationRequest.java |  79 
 ...etSubClusterPolicyConfigurationResponse.java |  36 
 .../records/SubClusterPolicyConfiguration.java  | 130 +
 ...sterPoliciesConfigurationsRequestPBImpl.java |  95 +
 ...terPoliciesConfigurationsResponsePBImpl.java | 191 +++
 ...ClusterPolicyConfigurationRequestPBImpl.java | 103 ++
 ...lusterPolicyConfigurationResponsePBImpl.java | 143 ++
 .../pb/GetSubClustersInfoResponsePBImpl.java|   4 +-
 ...ClusterPolicyConfigurationRequestPBImpl.java | 159 +++
 ...lusterPolicyConfigurationResponsePBImpl.java |  93 +
 .../pb/SubClusterPolicyConfigurationPBImpl.java | 121 
 .../proto/yarn_server_federation_protos.proto   |  28 +++
 .../records/TestFederationProtocolRecords.java  |  53 -
 18 files changed, 1536 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b040ef2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
new file mode 100644
index 000..9d9bd9b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse;
+
+/**
+ * The FederationPolicyStore provides a key-value interface to access the
+ * policies configured for the system. The key is a "queue" name, i.e., the
+ * system allows to configure a different policy for each queue in the system
+ * (though each policy can make dynamic run-time decisions on a 
per-job/per-task
+ * basis). The value is a {@code SubClusterPolicyConfiguration}, a serialized
+ * representation of the policy type and its parameters.
+ */
+@Private
+@Unstable
+p

[46/50] [abbrv] hadoop git commit: YARN-5408. Compose Federation membership/application/policy APIs into an uber FederationStateStore API. (Ellen Hui via Subru).

2017-01-27 Thread subru
YARN-5408. Compose Federation membership/application/policy APIs into an uber 
FederationStateStore API. (Ellen Hui via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15532084
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15532084
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15532084

Branch: refs/heads/YARN-2915
Commit: 15532084ee3f6270c8a0784e6d6d344496b49f17
Parents: 4b040ef
Author: Subru Krishnan 
Authored: Mon Aug 8 14:53:38 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 ...ederationApplicationHomeSubClusterStore.java | 18 ++
 .../store/FederationMembershipStateStore.java   | 14 +
 .../federation/store/FederationStateStore.java  | 64 
 .../store/impl/MemoryFederationStateStore.java  | 19 --
 .../impl/FederationStateStoreBaseTest.java  | 57 +
 .../impl/TestMemoryFederationStateStore.java| 21 +--
 6 files changed, 99 insertions(+), 94 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15532084/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
index 217ee2e..22bb88a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -30,7 +30,6 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHom
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse;
-import org.apache.hadoop.yarn.server.records.Version;
 
 /**
  * FederationApplicationHomeSubClusterStore maintains the state of all
@@ -50,15 +49,6 @@ import org.apache.hadoop.yarn.server.records.Version;
 public interface FederationApplicationHomeSubClusterStore {
 
   /**
-   * Get the {@link Version} of the underlying federation application state
-   * store.
-   *
-   * @return the {@link Version} of the underlying federation application state
-   * store
-   */
-  Version getApplicationStateStoreVersion();
-
-  /**
* Register the home {@code SubClusterId} of the newly submitted
* {@code ApplicationId}. Currently response is empty if the operation was
* successful, if not an exception reporting reason for a failure.
@@ -91,16 +81,16 @@ public interface FederationApplicationHomeSubClusterStore {
* {@code ApplicationId}.
*
* @param request contains the application queried
-   * @return {@code ApplicationHomeSubCluster} containing the application's
-   * home subcluster
+   * @return {@code ApplicationHomeSubCluster} containing the application's 
home
+   * subcluster
* @throws YarnException if the request is invalid/fails
*/
   GetApplicationHomeSubClusterResponse getApplicationHomeSubClusterMap(
   GetApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
-   * Get the {@code ApplicationHomeSubCluster} list representing the mapping
-   * of all submitted applications to it's home sub-cluster.
+   * Get the {@code ApplicationHomeSubCluster} list representing the mapping of
+   * all submitted applications to it's home sub-cluster.
*
* @param request empty representing all applications
* @return the mapping of all submitted application to it's home sub-cluster

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15532084/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipState

[11/50] [abbrv] hadoop git commit: HDFS-11369. Change exception message in StorageLocationChecker.

2017-01-27 Thread subru
HDFS-11369. Change exception message in StorageLocationChecker.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c1cc30b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c1cc30b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c1cc30b

Branch: refs/heads/YARN-2915
Commit: 7c1cc30b3c611ad2d0ae19ebaefd45f31a734e6c
Parents: 425a7e5
Author: Arpit Agarwal 
Authored: Thu Jan 26 09:12:38 2017 -0800
Committer: Arpit Agarwal 
Committed: Thu Jan 26 09:12:38 2017 -0800

--
 .../server/datanode/checker/StorageLocationChecker.java  | 11 ++-
 .../datanode/checker/TestStorageLocationChecker.java |  4 +++-
 2 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1cc30b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
index 6e323e0..a0bffcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
@@ -213,14 +213,15 @@ public class StorageLocationChecker {
 }
 
 if (failedLocations.size() > maxVolumeFailuresTolerated) {
-  throw new IOException(
-  "Too many failed volumes: " + failedLocations.size() +
-  ". The configuration allows for a maximum of " +
-  maxVolumeFailuresTolerated + " failed volumes.");
+  throw new DiskErrorException("Too many failed volumes - "
+  + "current valid volumes: " + goodLocations.size()
+  + ", volumes configured: " + dataDirs.size()
+  + ", volumes failed: " + failedLocations.size()
+  + ", volume failures tolerated: " + maxVolumeFailuresTolerated);
 }
 
 if (goodLocations.size() == 0) {
-  throw new IOException("All directories in "
+  throw new DiskErrorException("All directories in "
   + DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
   + failedLocations);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1cc30b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
index 0fe22cb..169a1b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
@@ -109,7 +109,9 @@ public class TestStorageLocationChecker {
 conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
 
 thrown.expect(IOException.class);
-thrown.expectMessage("Too many failed volumes");
+thrown.expectMessage("Too many failed volumes - current valid volumes: 1,"
++ " volumes configured: 3, volumes failed: 2, volume failures"
++ " tolerated: 1");
 StorageLocationChecker checker =
 new StorageLocationChecker(conf, new FakeTimer());
 checker.check(conf, locations);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: YARN-6123. [YARN-5864] Add a test to make sure queues of orderingPolicy will be updated when childQueues is added or removed. Contributed by Wangda Tan.

2017-01-27 Thread subru
YARN-6123. [YARN-5864] Add a test to make sure queues of orderingPolicy will be 
updated when childQueues is added or removed. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/165f07f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/165f07f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/165f07f5

Branch: refs/heads/YARN-2915
Commit: 165f07f51a03137d2e73e39ed1cb48385d963f39
Parents: 2034315
Author: Sunil G 
Authored: Fri Jan 27 18:37:51 2017 +0530
Committer: Sunil G 
Committed: Fri Jan 27 18:37:51 2017 +0530

--
 .../scheduler/capacity/ParentQueue.java |  3 +
 .../PriorityUtilizationQueueOrderingPolicy.java |  6 ++
 ...TestCapacitySchedulerSurgicalPreemption.java |  4 +-
 .../scheduler/capacity/TestQueueParsing.java| 58 
 4 files changed, 69 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/165f07f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 75ab610..0e5b884 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -343,6 +343,9 @@ public class ParentQueue extends AbstractCSQueue {
   // Re-sort all queues
   childQueues.clear();
   childQueues.addAll(currentChildQueues.values());
+
+  // Make sure we notifies QueueOrderingPolicy
+  queueOrderingPolicy.setQueues(childQueues);
 } finally {
   writeLock.unlock();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/165f07f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java
index fe60611..0544387 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java
@@ -18,6 +18,7 @@
 
 package 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang3.StringUtils;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
@@ -183,4 +184,9 @@ public class PriorityUtilizationQueueOrderingPolicy 
implements QueueOrderingPoli
   return CapacitySchedulerConfiguration.QUEUE_UTILIZATION_ORDERING_POLICY;
 }
   }
+
+  @VisibleForTesting
+  public List getQueues() {
+return queues;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/165f07f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPree

[09/50] [abbrv] hadoop git commit: HADOOP-13989. Remove erroneous source jar option from hadoop-client shade configuration. Contributed by Joe Pallas.

2017-01-27 Thread subru
HADOOP-13989. Remove erroneous source jar option from hadoop-client shade 
configuration. Contributed by Joe Pallas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd59b9cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd59b9cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd59b9cc

Branch: refs/heads/YARN-2915
Commit: cd59b9ccab51376310484a6e3d9179bb52fccae1
Parents: ff02bdf
Author: Andrew Wang 
Authored: Wed Jan 25 15:40:45 2017 -0800
Committer: Andrew Wang 
Committed: Wed Jan 25 15:40:45 2017 -0800

--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 1 -
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 1 -
 2 files changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd59b9cc/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index d29ef8f..83d2748 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -575,7 +575,6 @@
   shade
 
 
-  true
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd59b9cc/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 0f3140f..cff3329 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -138,7 +138,6 @@
   shade
 
 
-  true
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni Matteo Fumarola via Subru)

2017-01-27 Thread subru
YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni 
Matteo Fumarola via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5be22df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5be22df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5be22df

Branch: refs/heads/YARN-2915
Commit: f5be22df86587aa8d0d1d61e37000c2db9cd3e27
Parents: 9047b7b
Author: Subru Krishnan 
Authored: Wed Aug 17 12:07:06 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../store/impl/MemoryFederationStateStore.java  |   30 +
 ...cationHomeSubClusterStoreInputValidator.java |  183 +++
 ...ationMembershipStateStoreInputValidator.java |  317 +
 .../FederationPolicyStoreInputValidator.java|  144 ++
 ...derationStateStoreInvalidInputException.java |   48 +
 .../federation/store/utils/package-info.java|   17 +
 .../impl/FederationStateStoreBaseTest.java  |6 +-
 .../TestFederationStateStoreInputValidator.java | 1265 ++
 8 files changed, 2007 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5be22df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
index 8144435..6e564dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -57,6 +57,9 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegister
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationApplicationHomeSubClusterStoreInputValidator;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationMembershipStateStoreInputValidator;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator;
 import org.apache.hadoop.yarn.server.records.Version;
 import org.apache.hadoop.yarn.util.MonotonicClock;
 
@@ -88,6 +91,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public SubClusterRegisterResponse registerSubCluster(
   SubClusterRegisterRequest request) throws YarnException {
+FederationMembershipStateStoreInputValidator
+.validateSubClusterRegisterRequest(request);
 SubClusterInfo subClusterInfo = request.getSubClusterInfo();
 membership.put(subClusterInfo.getSubClusterId(), subClusterInfo);
 return SubClusterRegisterResponse.newInstance();
@@ -96,6 +101,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public SubClusterDeregisterResponse deregisterSubCluster(
   SubClusterDeregisterRequest request) throws YarnException {
+FederationMembershipStateStoreInputValidator
+.validateSubClusterDeregisterRequest(request);
 SubClusterInfo subClusterInfo = membership.get(request.getSubClusterId());
 if (subClusterInfo == null) {
   throw new YarnException(
@@ -111,6 +118,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   public SubClusterHeartbeatResponse subClusterHeartbeat(
   SubClusterHeartbeatRequest request) throws YarnException {
 
+FederationMembershipStateStoreInputValidator
+.validateSubClusterHeartbeatRequest(request);
 SubClusterId subClusterId = request.getSubClusterId();
 SubClusterInfo subClusterInfo = membership.get(subClusterId);
 
@@ -129,6 +138,9 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public GetSubClusterInfoResponse getSubCluster(
   GetSubClusterInfoRequest request) throws YarnException {
+
+FederationMembershipStateStoreInputValidator
+.validateGetSubClusterInfoRequest(request);
 SubClus

[03/50] [abbrv] hadoop git commit: HADOOP-13988. KMSClientProvider does not work with WebHDFS and Apache Knox w/ProxyUser. Contributed by Greg Senia and Xiaoyu Yao.

2017-01-27 Thread subru
HADOOP-13988. KMSClientProvider does not work with WebHDFS and Apache Knox 
w/ProxyUser. Contributed by Greg Senia and Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a46933e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a46933e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a46933e8

Branch: refs/heads/YARN-2915
Commit: a46933e8ce4c1715c11e3e3283bf0e8c2b53b837
Parents: 7fc3e68
Author: Xiaoyu Yao 
Authored: Wed Jan 25 13:26:50 2017 -0800
Committer: Xiaoyu Yao 
Committed: Wed Jan 25 13:33:06 2017 -0800

--
 .../hadoop/crypto/key/kms/KMSClientProvider.java   | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a46933e8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index df6768d..ccc8968 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -1071,10 +1071,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 return dtService;
   }
 
-  private boolean currentUgiContainsKmsDt() throws IOException {
-// Add existing credentials from current UGI, since provider is cached.
-Credentials creds = UserGroupInformation.getCurrentUser().
-getCredentials();
+  private boolean containsKmsDt(UserGroupInformation ugi) throws IOException {
+// Add existing credentials from the UGI, since provider is cached.
+Credentials creds = ugi.getCredentials();
 if (!creds.getAllTokens().isEmpty()) {
   org.apache.hadoop.security.token.Token
   dToken = creds.getToken(getDelegationTokenService());
@@ -1096,11 +1095,15 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 if (currentUgi.getRealUser() != null) {
   // Use real user for proxy user
   actualUgi = currentUgi.getRealUser();
-} else if (!currentUgiContainsKmsDt() &&
-!currentUgi.hasKerberosCredentials()) {
+}
+
+if (!containsKmsDt(actualUgi) &&
+!actualUgi.hasKerberosCredentials()) {
   // Use login user for user that does not have either
   // Kerberos credential or KMS delegation token for KMS operations
-  actualUgi = currentUgi.getLoginUser();
+  LOG.debug("using loginUser no KMS Delegation Token "
+  + "no Kerberos Credentials");
+  actualUgi = UserGroupInformation.getLoginUser();
 }
 return actualUgi;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: YARN-5407. In-memory based implementation of the FederationApplicationStateStore/FederationPolicyStateStore. (Ellen Hui via Subru)

2017-01-27 Thread subru
YARN-5407. In-memory based implementation of the 
FederationApplicationStateStore/FederationPolicyStateStore. (Ellen Hui via 
Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82fca628
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82fca628
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82fca628

Branch: refs/heads/YARN-2915
Commit: 82fca6283c938d6d84399a33364135c7a20848a7
Parents: 1553208
Author: Subru Krishnan 
Authored: Tue Aug 9 16:07:55 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../store/impl/MemoryFederationStateStore.java  | 158 +++-
 ...SubClusterPoliciesConfigurationsRequest.java |   2 +-
 ...ubClusterPoliciesConfigurationsResponse.java |   2 +-
 ...GetSubClusterPolicyConfigurationRequest.java |   3 +-
 ...etSubClusterPolicyConfigurationResponse.java |   2 +-
 ...SetSubClusterPolicyConfigurationRequest.java |  20 +-
 ...etSubClusterPolicyConfigurationResponse.java |   2 +-
 .../records/SubClusterPolicyConfiguration.java  |  27 +-
 ...tApplicationHomeSubClusterRequestPBImpl.java |   4 +
 ...ClusterPolicyConfigurationRequestPBImpl.java |  17 -
 .../pb/SubClusterPolicyConfigurationPBImpl.java |  17 +
 .../proto/yarn_server_federation_protos.proto   |   8 +-
 .../impl/FederationStateStoreBaseTest.java  | 367 ++-
 .../impl/TestMemoryFederationStateStore.java|   4 +-
 14 files changed, 558 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82fca628/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
index cea4ac2..a540dff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -20,35 +20,72 @@ package org.apache.hadoop.yarn.server.federation.store.impl;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
-import 
org.apache.hadoop.yarn.server.federation.store.FederationMembershipStateStore;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClus

[07/50] [abbrv] hadoop git commit: Add CHANGES, RELEASENOTES, and jdiff for 3.0.0-alpha2 release.

2017-01-27 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff02bdfe/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha2/CHANGES.3.0.0-alpha2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha2/CHANGES.3.0.0-alpha2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha2/CHANGES.3.0.0-alpha2.md
new file mode 100644
index 000..ac46033
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha2/CHANGES.3.0.0-alpha2.md
@@ -0,0 +1,927 @@
+
+
+# "Apache Hadoop" Changelog
+
+## Release 3.0.0-alpha2 - 2017-01-20
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-13361](https://issues.apache.org/jira/browse/HADOOP-13361) | Modify 
hadoop\_verify\_user to be consistent with hadoop\_subcommand\_opts (ie more 
granularity) |  Major | scripts | Allen Wittenauer | Allen Wittenauer |
+| [HDFS-6962](https://issues.apache.org/jira/browse/HDFS-6962) | ACL 
inheritance conflicts with umaskmode |  Critical | security | LINTE | John 
Zhuge |
+| [HADOOP-13341](https://issues.apache.org/jira/browse/HADOOP-13341) | 
Deprecate HADOOP\_SERVERNAME\_OPTS; replace with (command)\_(subcommand)\_OPTS 
|  Major | scripts | Allen Wittenauer | Allen Wittenauer |
+| [HADOOP-13588](https://issues.apache.org/jira/browse/HADOOP-13588) | 
ConfServlet should respect Accept request header |  Major | conf | Weiwei Yang 
| Weiwei Yang |
+| [HDFS-10636](https://issues.apache.org/jira/browse/HDFS-10636) | Modify 
ReplicaInfo to remove the assumption that replica metadata and data are stored 
in java.io.File. |  Major | datanode, fs | Virajith Jalaparti | Virajith 
Jalaparti |
+| [HADOOP-13218](https://issues.apache.org/jira/browse/HADOOP-13218) | Migrate 
other Hadoop side tests to prepare for removing WritableRPCEngine |  Major | 
test | Kai Zheng | Wei Zhou |
+| [HDFS-10877](https://issues.apache.org/jira/browse/HDFS-10877) | Make 
RemoteEditLogManifest.committedTxnId optional in Protocol Buffers |  Major | 
qjm | Sean Mackrory | Sean Mackrory |
+| [HADOOP-13681](https://issues.apache.org/jira/browse/HADOOP-13681) | Reduce 
Kafka dependencies in hadoop-kafka module |  Major | metrics | Grant Henke | 
Grant Henke |
+| [HADOOP-13678](https://issues.apache.org/jira/browse/HADOOP-13678) | Update 
jackson from 1.9.13 to 2.x in hadoop-tools |  Major | tools | Akira Ajisaka | 
Akira Ajisaka |
+| [MAPREDUCE-6776](https://issues.apache.org/jira/browse/MAPREDUCE-6776) | 
yarn.app.mapreduce.client.job.max-retries should have a more useful default |  
Major | client | Daniel Templeton | Miklos Szegedi |
+| [HADOOP-13699](https://issues.apache.org/jira/browse/HADOOP-13699) | 
Configuration does not substitute multiple references to the same var |  
Critical | conf | Andrew Wang | Andrew Wang |
+| [HDFS-10637](https://issues.apache.org/jira/browse/HDFS-10637) | 
Modifications to remove the assumption that FsVolumes are backed by 
java.io.File. |  Major | datanode, fs | Virajith Jalaparti | Virajith Jalaparti 
|
+| [HDFS-10916](https://issues.apache.org/jira/browse/HDFS-10916) | Switch from 
"raw" to "system" xattr namespace for erasure coding policy |  Major | 
erasure-coding | Andrew Wang | Andrew Wang |
+| [YARN-4464](https://issues.apache.org/jira/browse/YARN-4464) | Lower the 
default max applications stored in the RM and store |  Blocker | 
resourcemanager | KWON BYUNGCHANG | Daniel Templeton |
+| [HADOOP-13721](https://issues.apache.org/jira/browse/HADOOP-13721) | Remove 
stale method ViewFileSystem#getTrashCanLocation |  Minor | viewfs | Manoj 
Govindassamy | Manoj Govindassamy |
+| [HDFS-10957](https://issues.apache.org/jira/browse/HDFS-10957) | Retire BKJM 
from trunk |  Major | ha | Vinayakumar B | Vinayakumar B |
+| [HADOOP-13560](https://issues.apache.org/jira/browse/HADOOP-13560) | 
S3ABlockOutputStream to support huge (many GB) file writes |  Major | fs/s3 | 
Steve Loughran | Steve Loughran |
+| [MAPREDUCE-6791](https://issues.apache.org/jira/browse/MAPREDUCE-6791) | 
remove unnecessary dependency from hadoop-mapreduce-client-jobclient to 
hadoop-mapreduce-client-shuffle |  Minor | mrv2 | Haibo Chen | Haibo Chen |
+| [HADOOP-7352](https://issues.apache.org/jira/browse/HADOOP-7352) | 
FileSystem#listStatus should throw IOE upon access error |  Major | fs | Matt 
Foley | John Zhuge |
+| [HADOOP-13693](https://issues.apache.org/jira/browse/HADOOP-13693) | Remove 
the message about HTTP OPTIONS in SPNEGO initialization message from kms audit 
log |  Minor | kms | Xiao Chen | Xiao Chen |
+| [YARN-5388](https://issues.apache.org/jira/browse/YARN-5388) | Deprecate and 
remove DockerContainerExecutor |  Critical | nodemanager | Daniel Templeton | 
Daniel Templeton |
+| [YARN-3732](https://issues.apache.org/jira/browse/YARN-3732) | Change 
NodeHeartbeatResponse.java a

[47/50] [abbrv] hadoop git commit: YARN-3671. Integrate Federation services with ResourceManager. Contributed by Subru Krishnan

2017-01-27 Thread subru
YARN-3671. Integrate Federation services with ResourceManager. Contributed by 
Subru Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a4d38d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a4d38d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a4d38d9

Branch: refs/heads/YARN-2915
Commit: 0a4d38d9473707b57b474be2dfd2610e5d907906
Parents: b56fbdd
Author: Jian He 
Authored: Tue Aug 30 12:20:52 2016 +0800
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  11 +-
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +-
 .../failover/FederationProxyProviderUtil.java   |   2 +-
 .../FederationRMFailoverProxyProvider.java  |   4 +-
 ...ationMembershipStateStoreInputValidator.java |   7 +-
 .../TestFederationStateStoreInputValidator.java |  10 +-
 .../server/resourcemanager/ResourceManager.java |  26 ++
 .../FederationStateStoreHeartbeat.java  | 108 +++
 .../federation/FederationStateStoreService.java | 304 +++
 .../federation/package-info.java|  17 ++
 .../webapp/dao/ClusterMetricsInfo.java  |   5 +-
 .../TestFederationRMStateStoreService.java  | 170 +++
 12 files changed, 648 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a4d38d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bb6bcd1..402b37a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2491,9 +2491,6 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_PREFIX + "failover.enabled";
   public static final boolean DEFAULT_FEDERATION_FAILOVER_ENABLED = true;
 
-  public static final String FEDERATION_SUBCLUSTER_ID =
-  FEDERATION_PREFIX + "sub-cluster.id";
-
   public static final String FEDERATION_STATESTORE_CLIENT_CLASS =
   FEDERATION_PREFIX + "state-store.class";
 
@@ -2506,6 +2503,14 @@ public class YarnConfiguration extends Configuration {
   // 5 minutes
   public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
 
+  public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
+  FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
+
+  // 5 minutes
+  public static final int
+  DEFAULT_FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
+  5 * 60;
+
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a4d38d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index c4d8f38..5e0876f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -72,9 +72,9 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS);
 configurationPropsToSkipCompare
-.add(YarnConfiguration.FEDERATION_SUBCLUSTER_ID);
-configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
 
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a4d38d9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
--
diff --git 
a/hadoop

[14/50] [abbrv] hadoop git commit: MAPREDUCE-6829. Add peak memory usage counter for each task. (Miklos Szegedi via kasha)

2017-01-27 Thread subru
MAPREDUCE-6829. Add peak memory usage counter for each task. (Miklos Szegedi 
via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c65f884f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c65f884f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c65f884f

Branch: refs/heads/YARN-2915
Commit: c65f884fc7e08118524f8c88737119d8196b4c1b
Parents: 44606aa
Author: Karthik Kambatla 
Authored: Thu Jan 26 11:08:13 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Jan 26 11:08:13 2017 -0800

--
 .../java/org/apache/hadoop/mapred/Task.java |  24 ++-
 .../apache/hadoop/mapreduce/TaskCounter.java|   8 +-
 .../counters/FrameworkCounterGroup.java |   6 +-
 .../hadoop/mapreduce/TaskCounter.properties |   4 +
 .../org/apache/hadoop/mapred/TestCounters.java  |  31 ++-
 .../apache/hadoop/mapred/TestJobCounters.java   | 188 +++
 6 files changed, 256 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c65f884f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index 119d6a7..c1ae0ab 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -110,7 +110,11 @@ abstract public class Task implements Writable, 
Configurable {
 CPU_MILLISECONDS,
 PHYSICAL_MEMORY_BYTES,
 VIRTUAL_MEMORY_BYTES,
-COMMITTED_HEAP_BYTES
+COMMITTED_HEAP_BYTES,
+MAP_PHYSICAL_MEMORY_BYTES_MAX,
+MAP_VIRTUAL_MEMORY_BYTES_MAX,
+REDUCE_PHYSICAL_MEMORY_BYTES_MAX,
+REDUCE_VIRTUAL_MEMORY_BYTES_MAX
   }
 
   /**
@@ -964,6 +968,24 @@ abstract public class Task implements Writable, 
Configurable {
 if (vMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
   counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
 }
+
+if (pMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
+  TaskCounter counter = isMapTask() ?
+  TaskCounter.MAP_PHYSICAL_MEMORY_BYTES_MAX :
+  TaskCounter.REDUCE_PHYSICAL_MEMORY_BYTES_MAX;
+  Counters.Counter pMemCounter =
+  counters.findCounter(counter);
+  pMemCounter.setValue(Math.max(pMemCounter.getValue(), pMem));
+}
+
+if (vMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
+  TaskCounter counter = isMapTask() ?
+  TaskCounter.MAP_VIRTUAL_MEMORY_BYTES_MAX :
+  TaskCounter.REDUCE_VIRTUAL_MEMORY_BYTES_MAX;
+  Counters.Counter vMemCounter =
+  counters.findCounter(counter);
+  vMemCounter.setValue(Math.max(vMemCounter.getValue(), vMem));
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c65f884f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCounter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCounter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCounter.java
index 42ef067..0fab96c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCounter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCounter.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public enum TaskCounter {
-  MAP_INPUT_RECORDS, 
+  MAP_INPUT_RECORDS,
   MAP_OUTPUT_RECORDS,
   MAP_SKIPPED_RECORDS,
   MAP_OUTPUT_BYTES,
@@ -47,5 +47,9 @@ public enum TaskCounter {
   CPU_MILLISECONDS,
   PHYSICAL_MEMORY_BYTES,
   VIRTUAL_MEMORY_BYTES,
-  COMMITTED_HEAP_BYTES
+  COMMITTED_HEAP_BYTES,
+  MAP_PHYSICAL_MEMORY_BYTES_MAX,
+  MAP_VIRTUAL_MEMORY_BYTES_MAX,
+  REDUCE_PHYSICAL_MEMORY_BYTES_MAX,
+  REDUCE_VIRTUAL_MEMORY_BYTES_MAX;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c65f884f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counter

[35/50] [abbrv] hadoop git commit: YARN-5905. Update the RM webapp host that is reported as part of Federation membership to current primary RM's IP.

2017-01-27 Thread subru
YARN-5905. Update the RM webapp host that is reported as part of Federation 
membership to current primary RM's IP.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc861755
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc861755
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc861755

Branch: refs/heads/YARN-2915
Commit: dc86175518cb15276e6e7f6878f6a3089ef4494e
Parents: 3fd588f
Author: Subru Krishnan 
Authored: Tue Nov 22 18:30:40 2016 -0800
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../federation/FederationStateStoreService.java  |  4 ++--
 .../federation/TestFederationRMStateStoreService.java| 11 ++-
 2 files changed, 12 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc861755/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
index 9a01d7e..530184f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
@@ -177,8 +177,8 @@ public class FederationStateStoreService extends 
AbstractService
 config.getSocketAddr(YarnConfiguration.RM_ADMIN_ADDRESS,
 YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
 YarnConfiguration.DEFAULT_RM_ADMIN_PORT));
-String webAppAddress =
-WebAppUtils.getResolvedRemoteRMWebAppURLWithoutScheme(config);
+String webAppAddress = getServiceAddress(NetUtils
+.createSocketAddr(WebAppUtils.getRMWebAppURLWithScheme(config)));
 
 SubClusterInfo subClusterInfo = SubClusterInfo.newInstance(subClusterId,
 amRMAddress, clientRMAddress, rmAdminAddress, webAppAddress,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc861755/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
index 30f69b5..d92a793 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
@@ -19,6 +19,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.federation;
 
 import java.io.IOException;
 import java.io.StringReader;
+import java.net.UnknownHostException;
 
 import javax.xml.bind.JAXBException;
 
@@ -157,12 +158,20 @@ public class TestFederationRMStateStoreService {
   }
 
   private String checkSubClusterInfo(SubClusterState state)
-  throws YarnException {
+  throws YarnException, UnknownHostException {
 Assert.assertNotNull(stateStore.getSubCluster(request));
 SubClusterInfo response =
 stateStore.getSubCluster(request).getSubClusterInfo();
 Assert.assertEquals(state, response.getState());
 Assert.assertTrue(response.getLastHeartBeat() >= lastHearbeatTS);
+String expectedAddress =
+(response.getClientRMServiceAddress().split(":"))[0];
+Assert.assertEquals(expectedAddress,
+(response.getAMRMServiceAddress().split(":"))[0]);
+Assert.assertEquals(expectedAddress,
+(response.getRMAdminServiceAddress().split(":"))[0]);
+Assert.assertEquals(expectedAddress,
+(respon

[31/50] [abbrv] hadoop git commit: YARN-5612. Return SubClusterId in FederationStateStoreFacade#addApplicationHomeSubCluster for Router Failover. (Giovanni Matteo Fumarola via Subru).

2017-01-27 Thread subru
YARN-5612. Return SubClusterId in 
FederationStateStoreFacade#addApplicationHomeSubCluster for Router Failover. 
(Giovanni Matteo Fumarola via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bd6b12e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bd6b12e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bd6b12e

Branch: refs/heads/YARN-2915
Commit: 5bd6b12e11383da7fa328438e4aa160c11aa32af
Parents: 0a4d38d
Author: Subru Krishnan 
Authored: Thu Sep 1 13:55:54 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../utils/FederationStateStoreFacade.java   | 11 ---
 .../utils/TestFederationStateStoreFacade.java   | 30 
 2 files changed, 37 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bd6b12e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index f1c8218..66a0b60 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse;
@@ -298,13 +299,15 @@ public final class FederationStateStoreFacade {
*
* @param appHomeSubCluster the mapping of the application to it's home
*  sub-cluster
+   * @return the stored Subcluster from StateStore
* @throws YarnException if the call to the state store is unsuccessful
*/
-  public void addApplicationHomeSubCluster(
+  public SubClusterId addApplicationHomeSubCluster(
   ApplicationHomeSubCluster appHomeSubCluster) throws YarnException {
-stateStore.addApplicationHomeSubCluster(
-AddApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
-return;
+AddApplicationHomeSubClusterResponse response =
+stateStore.addApplicationHomeSubCluster(
+
AddApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
+return response.getHomeSubCluster();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bd6b12e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
index 53f4f84..d46bef0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore;
+import 
org.apache.hadoop.yarn.server.feder

[27/50] [abbrv] hadoop git commit: YARN-3673. Create a FailoverProxy for Federation services. Contributed by Subru Krishnan

2017-01-27 Thread subru
YARN-3673. Create a FailoverProxy for Federation services. Contributed by Subru 
Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b56fbdd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b56fbdd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b56fbdd3

Branch: refs/heads/YARN-2915
Commit: b56fbdd321adc4c0085425b15e29129819fc1eb8
Parents: f5be22d
Author: Jian He 
Authored: Mon Aug 22 14:43:07 2016 +0800
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../org/apache/hadoop/yarn/conf/HAUtil.java |  30 ++-
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../TestFederationRMFailoverProxyProvider.java  | 154 ++
 .../hadoop/yarn/client/ClientRMProxy.java   |   4 +-
 .../org/apache/hadoop/yarn/client/RMProxy.java  |  23 +-
 .../src/main/resources/yarn-default.xml |   7 +
 .../hadoop-yarn-server-common/pom.xml   |   2 -
 .../hadoop/yarn/server/api/ServerRMProxy.java   |   4 +-
 .../failover/FederationProxyProviderUtil.java   | 163 ++
 .../FederationRMFailoverProxyProvider.java  | 211 +++
 .../federation/failover/package-info.java   |  17 ++
 12 files changed, 613 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b56fbdd3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
index e4948e7..942b08a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.yarn.conf;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -27,8 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
-import java.net.InetSocketAddress;
-import java.util.Collection;
+import com.google.common.annotations.VisibleForTesting;
 
 @InterfaceAudience.Private
 public class HAUtil {
@@ -44,6 +45,29 @@ public class HAUtil {
   }
 
   /**
+   * Returns true if Federation is configured.
+   *
+   * @param conf Configuration
+   * @return true if federation is configured in the configuration; else false.
+   */
+  public static boolean isFederationEnabled(Configuration conf) {
+return conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
+YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
+  }
+
+  /**
+   * Returns true if RM failover is enabled in a Federation setting.
+   *
+   * @param conf Configuration
+   * @return if RM failover is enabled in conjunction with Federation in the
+   * configuration; else false.
+   */
+  public static boolean isFederationFailoverEnabled(Configuration conf) {
+return conf.getBoolean(YarnConfiguration.FEDERATION_FAILOVER_ENABLED,
+YarnConfiguration.DEFAULT_FEDERATION_FAILOVER_ENABLED);
+  }
+
+  /**
* Returns true if Resource Manager HA is configured.
*
* @param conf Configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b56fbdd3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 425ed28..bb6bcd1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2484,6 +2484,16 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation.";
 
+  public static final String FEDERATION_ENABLED = FEDERATION_PREFIX + 
"enabled";
+  public static final boolean DEFAULT_FEDERATION_ENABLED = false;
+
+  public static final String FEDERATION_FAILOVER

[42/50] [abbrv] hadoop git commit: YARN-5406. In-memory based implementation of the FederationMembershipStateStore. Contributed by Ellen Hui.

2017-01-27 Thread subru
YARN-5406. In-memory based implementation of the 
FederationMembershipStateStore. Contributed by Ellen Hui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c15afed7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c15afed7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c15afed7

Branch: refs/heads/YARN-2915
Commit: c15afed76e7eef97cef78e4e693a8802cf5d3c7a
Parents: 6a2dbfb
Author: Subru Krishnan 
Authored: Thu Aug 4 15:54:38 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../store/impl/MemoryFederationStateStore.java  | 138 
 .../federation/store/impl/package-info.java |  17 ++
 .../records/GetSubClustersInfoRequest.java  |   4 +
 .../store/records/SubClusterState.java  |   4 +
 .../impl/FederationStateStoreBaseTest.java  | 221 +++
 .../impl/TestMemoryFederationStateStore.java|  49 
 6 files changed, 433 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c15afed7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
new file mode 100644
index 000..7fdc4a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import 
org.apache.hadoop.yarn.server.federation.store.FederationMembershipStateStore;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse;
+import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.util.MonotonicClock;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * In-memory implementation of FederationMembershipStateStore.
+ */
+public class MemoryFederationStateStore
+implements FederationMembershipStateStore {
+
+  private final Map membership =
+  new ConcurrentHashMap();
+  private final MonotonicClock clock = new MonotonicClock();
+
+  @Override
+  public Version getMembershipStateStoreVersion() {
+return null;
+  }
+
+  @Override
+  public SubClusterRegisterResponse registerSubCluster(
+  SubClusterRegisterRequest request) throws YarnEx

[23/50] [abbrv] hadoop git commit: YARN-5324. Stateless Federation router policies implementation. (Carlo Curino via Subru).

2017-01-27 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ecaa40/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
index 8c2115b..f901329 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
@@ -19,13 +19,20 @@ package org.apache.hadoop.yarn.server.federation.utils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.ConfigurableFederationPolicy;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
+import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl;
 import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
-import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.*;
 
 import java.net.URL;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
@@ -41,6 +48,41 @@ public final class FederationPoliciesTestUtil {
 // disabled.
   }
 
+
+  public static void initializePolicyContext(
+  FederationPolicyInitializationContext fpc, ConfigurableFederationPolicy
+  policy, WeightedPolicyInfo policyInfo,
+  Map activeSubclusters)
+  throws YarnException {
+ByteBuffer buf = policyInfo.toByteBuffer();
+fpc.setSubClusterPolicyConfiguration(SubClusterPolicyConfiguration
+.newInstance("queue1", policy.getClass().getCanonicalName(), buf));
+FederationStateStoreFacade facade = FederationStateStoreFacade
+.getInstance();
+FederationStateStore fss = mock(FederationStateStore.class);
+
+if (activeSubclusters == null) {
+  activeSubclusters = new HashMap();
+}
+GetSubClustersInfoResponse response = GetSubClustersInfoResponse
+.newInstance(new 
ArrayList(activeSubclusters.values()));
+
+when(fss.getSubClusters(any())).thenReturn(response);
+facade.reinitialize(fss, new Configuration());
+fpc.setFederationStateStoreFacade(facade);
+policy.reinitialize(fpc);
+  }
+
+  public static void initializePolicyContext(
+  ConfigurableFederationPolicy policy,
+  WeightedPolicyInfo policyInfo, Map activeSubclusters) throws YarnException {
+FederationPolicyInitializationContext context =
+new FederationPolicyInitializationContext(null, initResolver(),
+initFacade());
+initializePolicyContext(context, policy, policyInfo, activeSubclusters);
+  }
+
   /**
* Initialize a {@link SubClusterResolver}.
*
@@ -66,18 +108,52 @@ public final class FederationPoliciesTestUtil {
* Initialiaze a main-memory {@link FederationStateStoreFacade} used for
* testing, wiht a mock resolver.
*
+   * @param subClusterInfos the list of subclusters to be served on
+   *getSubClusters invocations.
+   *
* @return the facade.
*
* @throws YarnException in case the initialization is not successful.
*/
-  public static FederationStateStoreFacade initFacade() throws YarnException {
+
+  public static FederationStateStoreFacade initFacade(
+  List subClusterInfos, SubClusterPolicyConfiguration
+  policyConfiguration) throws YarnException {
 FederationStateStoreFacade goodFacade = FederationStateStoreFacade
 .getInstance();
 FederationStateStore fss = mock(FederationStateStore.class);
 GetSubClustersInfoResponse response = GetSubClustersInfoResponse
-.newInstance(new ArrayList<>());
+.newInstance(subClusterInfos);
 when(fss.getSubClusters(any())).thenReturn(response);
+
+List configurations = new ArrayList<>();
+configurations.add(policyConfiguration);
+
+GetSubClusterPoliciesConfigurationsResponse policiesResponse =
+   

[04/50] [abbrv] hadoop git commit: YARN-5641. Localizer leaves behind tarballs after container is complete. Contributed by Eric Badger

2017-01-27 Thread subru
YARN-5641. Localizer leaves behind tarballs after container is complete. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e19f758
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e19f758
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e19f758

Branch: refs/heads/YARN-2915
Commit: 9e19f758c1950cbcfcd1969461a8a910efca0767
Parents: a46933e
Author: Jason Lowe 
Authored: Wed Jan 25 21:41:43 2017 +
Committer: Jason Lowe 
Committed: Wed Jan 25 21:41:43 2017 +

--
 .../main/java/org/apache/hadoop/util/Shell.java |  52 ++-
 .../java/org/apache/hadoop/util/TestShell.java  |   4 +-
 .../localizer/ContainerLocalizer.java   |  53 +++-
 .../localizer/TestContainerLocalizer.java   | 317 +++
 4 files changed, 348 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e19f758/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 83877b7..ca59b0e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -27,7 +27,9 @@ import java.io.InterruptedIOException;
 import java.nio.charset.Charset;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.WeakHashMap;
@@ -50,8 +52,8 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public abstract class Shell {
-  private static final Map  CHILD_PROCESSES =
-  Collections.synchronizedMap(new WeakHashMap());
+  private static final Map CHILD_SHELLS =
+  Collections.synchronizedMap(new WeakHashMap());
   public static final Logger LOG = LoggerFactory.getLogger(Shell.class);
 
   /**
@@ -820,6 +822,7 @@ public abstract class Shell {
   private File dir;
   private Process process; // sub process used to execute the command
   private int exitCode;
+  private Thread waitingThread;
 
   /** Flag to indicate whether or not the script has finished executing. */
   private final AtomicBoolean completed = new AtomicBoolean(false);
@@ -920,7 +923,9 @@ public abstract class Shell {
 } else {
   process = builder.start();
 }
-CHILD_PROCESSES.put(process, null);
+
+waitingThread = Thread.currentThread();
+CHILD_SHELLS.put(this, null);
 
 if (timeOutInterval > 0) {
   timeOutTimer = new Timer("Shell command timeout");
@@ -1017,7 +1022,8 @@ public abstract class Shell {
 LOG.warn("Error while closing the error stream", ioe);
   }
   process.destroy();
-  CHILD_PROCESSES.remove(process);
+  waitingThread = null;
+  CHILD_SHELLS.remove(this);
   lastTime = Time.monotonicNow();
 }
   }
@@ -1065,6 +1071,15 @@ public abstract class Shell {
 return exitCode;
   }
 
+  /** get the thread that is waiting on this instance of Shell.
+   * @return the thread that ran runCommand() that spawned this shell
+   * or null if no thread is waiting for this shell to complete
+   */
+  public Thread getWaitingThread() {
+return waitingThread;
+  }
+
+
   /**
* This is an IOException with exit code added.
*/
@@ -1318,20 +1333,27 @@ public abstract class Shell {
   }
 
   /**
-   * Static method to destroy all running Shell processes
-   * Iterates through a list of all currently running Shell
-   * processes and destroys them one by one. This method is thread safe and
-   * is intended to be used in a shutdown hook.
+   * Static method to destroy all running Shell processes.
+   * Iterates through a map of all currently running Shell
+   * processes and destroys them one by one. This method is thread safe
*/
-  public static void destroyAllProcesses() {
-synchronized (CHILD_PROCESSES) {
-  for (Process key : CHILD_PROCESSES.keySet()) {
-Process process = key;
-if (key != null) {
-  process.destroy();
+  public static void destroyAllShellProcesses() {
+synchronized (CHILD_SHELLS) {
+  for (Shell shell : CHILD_SHELLS.keySet()) {
+if (shell.getProcess() != null) {
+  shell.getProcess().destroy();
 }
   }
-  CHILD_PROCESSES.clear();
+  CHILD_SHELLS.clear();
+}
+  }
+
+  /**
+   * Static method to return a Set of all Shell objects.
+   */
+  public static Set getAllShells() {
+synchronized (CHILD_SHELLS) {
+  return new

[50/50] [abbrv] hadoop git commit: YARN-5307. Federation Application State Store internal APIs

2017-01-27 Thread subru
YARN-5307. Federation Application State Store internal APIs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6be8b7e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6be8b7e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6be8b7e9

Branch: refs/heads/YARN-2915
Commit: 6be8b7e946b30b4daafb1f71094a9fe7c423c4ad
Parents: fa3adb5
Author: Subru Krishnan 
Authored: Fri Aug 5 11:52:44 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 ...ederationApplicationHomeSubClusterStore.java | 126 
 .../AddApplicationHomeSubClusterRequest.java|  72 +++
 .../AddApplicationHomeSubClusterResponse.java   |  44 +
 .../records/ApplicationHomeSubCluster.java  | 124 
 .../DeleteApplicationHomeSubClusterRequest.java |  65 +++
 ...DeleteApplicationHomeSubClusterResponse.java |  43 +
 .../GetApplicationHomeSubClusterRequest.java|  64 +++
 .../GetApplicationHomeSubClusterResponse.java   |  73 +++
 .../GetApplicationsHomeSubClusterRequest.java   |  40 
 .../GetApplicationsHomeSubClusterResponse.java  |  75 
 .../UpdateApplicationHomeSubClusterRequest.java |  74 
 ...UpdateApplicationHomeSubClusterResponse.java |  43 +
 ...dApplicationHomeSubClusterRequestPBImpl.java | 132 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 .../pb/ApplicationHomeSubClusterPBImpl.java | 167 
 ...eApplicationHomeSubClusterRequestPBImpl.java | 130 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 ...tApplicationHomeSubClusterRequestPBImpl.java | 135 +
 ...ApplicationHomeSubClusterResponsePBImpl.java | 132 +
 ...ApplicationsHomeSubClusterRequestPBImpl.java |  78 
 ...pplicationsHomeSubClusterResponsePBImpl.java | 190 +++
 .../pb/GetSubClustersInfoResponsePBImpl.java|   6 +-
 ...eApplicationHomeSubClusterRequestPBImpl.java | 132 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 .../proto/yarn_server_federation_protos.proto   |  45 -
 .../records/TestFederationProtocolRecords.java  |  81 
 26 files changed, 2301 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be8b7e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
new file mode 100644
index 000..217ee2e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
+i

[29/50] [abbrv] hadoop git commit: YARN-3662. Federation Membership State Store internal APIs.

2017-01-27 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a2dbfba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
new file mode 100644
index 000..d4c5451
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProtoOrBuilder;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterStateProto;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+
+import com.google.protobuf.TextFormat;
+
+/**
+ * Protocol buffer based implementation of {@link SubClusterDeregisterRequest}.
+ */
+@Private
+@Unstable
+public class SubClusterDeregisterRequestPBImpl
+extends SubClusterDeregisterRequest {
+
+  private SubClusterDeregisterRequestProto proto =
+  SubClusterDeregisterRequestProto.getDefaultInstance();
+  private SubClusterDeregisterRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  public SubClusterDeregisterRequestPBImpl() {
+builder = SubClusterDeregisterRequestProto.newBuilder();
+  }
+
+  public SubClusterDeregisterRequestPBImpl(
+  SubClusterDeregisterRequestProto proto) {
+this.proto = proto;
+viaProto = true;
+  }
+
+  public SubClusterDeregisterRequestProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
+  }
+
+  private void mergeLocalToProto() {
+if (viaProto) {
+  maybeInitBuilder();
+}
+mergeLocalToBuilder();
+proto = builder.build();
+viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = SubClusterDeregisterRequestProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
+  private void mergeLocalToBuilder() {
+  }
+
+  @Override
+  public int hashCode() {
+return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+if (other == null) {
+  return false;
+}
+if (other.getClass().isAssignableFrom(this.getClass())) {
+  return this.getProto().equals(this.getClass().cast(other).getProto());
+}
+return false;
+  }
+
+  @Override
+  public String toString() {
+return TextFormat.shortDebugString(getProto());
+  }
+
+  @Override
+  public SubClusterId getSubClusterId() {
+SubClusterDeregisterRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasSubClusterId()) {
+  return null;
+}
+return convertFromProtoFormat(p.getSubClusterId());
+  }
+
+  @Override
+  public void setSubClusterId(SubClusterId subClusterId) {
+maybeInitBuilder();
+if (subClusterId == null) {
+  builder.clearSubClusterId();
+  return;
+}
+builder.setSubClusterId(convertToProtoFormat(s

[10/50] [abbrv] hadoop git commit: YARN-3637. Handle localization sym-linking correctly at the YARN level. Contributed by Chris Trezzo.

2017-01-27 Thread subru
YARN-3637. Handle localization sym-linking correctly at the YARN level. 
Contributed by Chris Trezzo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/425a7e50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/425a7e50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/425a7e50

Branch: refs/heads/YARN-2915
Commit: 425a7e502869c4250aba927ecc3c6f3c561c6ff2
Parents: cd59b9c
Author: Sangjin Lee 
Authored: Wed Jan 25 15:51:36 2017 -0800
Committer: Sangjin Lee 
Committed: Wed Jan 25 15:51:36 2017 -0800

--
 .../yarn/client/api/SharedCacheClient.java  | 23 +---
 .../client/api/impl/SharedCacheClientImpl.java  | 31 ++--
 .../api/impl/TestSharedCacheClientImpl.java | 37 +---
 3 files changed, 81 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/425a7e50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
index 7cbe0e1..60c1bd98 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
@@ -55,22 +55,37 @@ public abstract class SharedCacheClient extends 
AbstractService {
* {@link ApplicationId} to identify which application will be using the
* resource.
* 
-   * 
+   *
* 
* The SharedCacheManager responds with whether or not the
* resource exists in the cache. If the resource exists, a Path
* to the resource in the shared cache is returned. If the resource does not
* exist, null is returned instead.
* 
-   * 
+   *
+   * 
+   * Once a path has been returned for a resource, that path is safe to use for
+   * the lifetime of the application that corresponds to the provided
+   * ApplicationId.
+   * 
+   *
+   * 
+   * Additionally, a name for the resource should be specified. A fragment will
+   * be added to the path with the desired name if the desired name is 
different
+   * than the name of the provided path from the shared cache. This ensures 
that
+   * if the returned path is used to create a LocalResource, then the symlink
+   * created during YARN localization will match the name specified.
+   * 
+   *
* @param applicationId ApplicationId of the application using the resource
* @param resourceKey the key (i.e. checksum) that identifies the resource
+   * @param resourceName the desired name of the resource
* @return Path to the resource, or null if it does not exist
*/
   @Public
   @Unstable
-  public abstract Path use(ApplicationId applicationId, String resourceKey)
-  throws YarnException;
+  public abstract Path use(ApplicationId applicationId, String resourceKey,
+  String resourceName) throws YarnException;
 
   /**
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/425a7e50/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
index 0a61ee0..b910c28 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.client.api.impl;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -111,8 +113,8 @@ public class SharedCacheClientImpl extends 
SharedCacheClient {
   }
 
   @Override
-  public Path use(ApplicationId applicationId, String resourceKey)
-  throws YarnException {
+  public Path use(ApplicationId applicationId, String resourceKey,
+  String resourceName) throws YarnException {
 Path resourcePath = null;
 UseSharedCacheResourceRequest request = Records.newRecord(
 

[39/50] [abbrv] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2017-01-27 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2d5ff77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
index e57709f..5de749f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
@@ -17,8 +17,8 @@
 
 package org.apache.hadoop.yarn.server.federation.policies.router;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import java.util.Map;
+
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
@@ -30,34 +30,27 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 
-import java.util.Map;
-
 /**
  * This implements a simple load-balancing policy. The policy "weights" are
  * binary 0/1 values that enable/disable each sub-cluster, and the policy peaks
  * the sub-cluster with the least load to forward this application.
  */
-public class LoadBasedRouterPolicy
-extends BaseWeightedRouterPolicy {
-
-  private static final Log LOG =
-  LogFactory.getLog(LoadBasedRouterPolicy.class);
+public class LoadBasedRouterPolicy extends AbstractRouterPolicy {
 
   @Override
-  public void reinitialize(FederationPolicyInitializationContext
-  federationPolicyContext)
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
   throws FederationPolicyInitializationException {
 
 // remember old policyInfo
 WeightedPolicyInfo tempPolicy = getPolicyInfo();
 
-//attempt new initialization
-super.reinitialize(federationPolicyContext);
+// attempt new initialization
+super.reinitialize(policyContext);
 
-//check extra constraints
+// check extra constraints
 for (Float weight : getPolicyInfo().getRouterPolicyWeights().values()) {
   if (weight != 0 && weight != 1) {
-//reset to old policyInfo if check fails
+// reset to old policyInfo if check fails
 setPolicyInfo(tempPolicy);
 throw new FederationPolicyInitializationException(
 this.getClass().getCanonicalName()
@@ -69,18 +62,16 @@ public class LoadBasedRouterPolicy
 
   @Override
   public SubClusterId getHomeSubcluster(
-  ApplicationSubmissionContext appSubmissionContext)
-  throws YarnException {
+  ApplicationSubmissionContext appSubmissionContext) throws YarnException {
 
 Map activeSubclusters =
 getActiveSubclusters();
 
-Map weights = getPolicyInfo()
-.getRouterPolicyWeights();
+Map weights =
+getPolicyInfo().getRouterPolicyWeights();
 SubClusterIdInfo chosen = null;
 long currBestMem = -1;
-for (Map.Entry entry :
-activeSubclusters
+for (Map.Entry entry : activeSubclusters
 .entrySet()) {
   SubClusterIdInfo id = new SubClusterIdInfo(entry.getKey());
   if (weights.containsKey(id) && weights.get(id) > 0) {
@@ -95,8 +86,7 @@ public class LoadBasedRouterPolicy
 return chosen.toId();
   }
 
-  private long getAvailableMemory(SubClusterInfo value)
-  throws YarnException {
+  private long getAvailableMemory(SubClusterInfo value) throws YarnException {
 try {
   long mem = -1;
   JSONObject obj = new JSONObject(value.getCapability());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2d5ff77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
index a8ac5f7..bc3a1f7 100644
--- 
a/hadoop-yarn-pro

[43/50] [abbrv] hadoop git commit: YARN-5601. Make the RM epoch base value configurable. Contributed by Subru Krishnan

2017-01-27 Thread subru
YARN-5601. Make the RM epoch base value configurable. Contributed by Subru 
Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05db3df7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05db3df7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05db3df7

Branch: refs/heads/YARN-2915
Commit: 05db3df7c6960210f70f97a38a57b40b5cd8e786
Parents: 5bd6b12
Author: Jian He 
Authored: Fri Sep 2 12:23:57 2016 +0800
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml | 5 -
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java  | 3 +++
 .../apache/hadoop/yarn/conf/TestYarnConfigurationFields.java | 2 ++
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java  | 7 +++
 .../resourcemanager/recovery/FileSystemRMStateStore.java | 2 +-
 .../server/resourcemanager/recovery/LeveldbRMStateStore.java | 2 +-
 .../server/resourcemanager/recovery/MemoryRMStateStore.java  | 1 +
 .../yarn/server/resourcemanager/recovery/RMStateStore.java   | 4 
 .../yarn/server/resourcemanager/recovery/ZKRMStateStore.java | 2 +-
 .../resourcemanager/recovery/RMStateStoreTestBase.java   | 8 +---
 .../server/resourcemanager/recovery/TestFSRMStateStore.java  | 1 +
 .../resourcemanager/recovery/TestLeveldbRMStateStore.java| 1 +
 .../server/resourcemanager/recovery/TestZKRMStateStore.java  | 1 +
 13 files changed, 32 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05db3df7/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 2f5451d..bbd03a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -293,7 +293,10 @@
   
   
 
-
+
+  
+  
+
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05db3df7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 402b37a..b9aa73c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -142,6 +142,9 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_HOSTNAME = RM_PREFIX + "hostname";
 
+  public static final String RM_EPOCH = RM_PREFIX + "epoch";
+  public static final long DEFAULT_RM_EPOCH = 0L;
+
   /** The address of the applications manager interface in the RM.*/
   public static final String RM_ADDRESS = 
 RM_PREFIX + "address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05db3df7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 5e0876f..3f3a06c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -75,6 +75,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.RM_EPOCH);
 
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05db3df7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java

[12/50] [abbrv] hadoop git commit: YARN-4975. Fair Scheduler: exception thrown when a parent queue marked 'parent' has configured child queues (Contributed by Yufei Gu via Daniel Templeton)

2017-01-27 Thread subru
YARN-4975. Fair Scheduler: exception thrown when a parent queue marked 'parent' 
has configured child queues
(Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f85b74cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f85b74cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f85b74cc

Branch: refs/heads/YARN-2915
Commit: f85b74ccf9f1c1c1444cc00750b03468cbf40fb9
Parents: 7c1cc30
Author: Daniel Templeton 
Authored: Thu Jan 26 10:31:09 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Jan 26 10:31:09 2017 -0800

--
 .../fair/AllocationFileLoaderService.java   | 26 +++---
 .../fair/TestAllocationFileLoaderService.java   | 88 
 2 files changed, 101 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f85b74cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index cd4a19b..163a265 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -487,6 +487,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 Map racls = new HashMap<>();
 NodeList fields = element.getChildNodes();
 boolean isLeaf = true;
+boolean isReservable = false;
 
 for (int j = 0; j < fields.getLength(); j++) {
   Node fieldNode = fields.item(j);
@@ -558,7 +559,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 racls.put(ReservationACL.SUBMIT_RESERVATIONS,
 new AccessControlList(text));
   } else if ("reservation".equals(field.getTagName())) {
-isLeaf = false;
+isReservable = true;
 reservableQueues.add(queueName);
 configuredQueues.get(FSQueueType.PARENT).add(queueName);
   } else if ("allowPreemptionFrom".equals(field.getTagName())) {
@@ -577,22 +578,21 @@ public class AllocationFileLoaderService extends 
AbstractService {
 isLeaf = false;
   }
 }
-if (isLeaf) {
-  // if a leaf in the alloc file is marked as type='parent'
-  // then store it under 'parent'
-  if ("parent".equals(element.getAttribute("type"))) {
-configuredQueues.get(FSQueueType.PARENT).add(queueName);
-  } else {
-configuredQueues.get(FSQueueType.LEAF).add(queueName);
-  }
+
+// if a leaf in the alloc file is marked as type='parent'
+// then store it as a parent queue
+if (isLeaf && !"parent".equals(element.getAttribute("type"))) {
+  configuredQueues.get(FSQueueType.LEAF).add(queueName);
 } else {
-  if ("parent".equals(element.getAttribute("type"))) {
-throw new AllocationConfigurationException("Both  and " +
-"type=\"parent\" found for queue " + queueName + " which is " +
-"unsupported");
+  if (isReservable) {
+throw new AllocationConfigurationException("The configuration settings"
++ " for " + queueName + " are invalid. A queue element that "
++ "contains child queue elements or that has the type='parent' "
++ "attribute cannot also include a reservation element.");
   }
   configuredQueues.get(FSQueueType.PARENT).add(queueName);
 }
+
 // Set default acls if not defined
 // The root queue defaults to all access
 for (QueueACL acl : QueueACL.values()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f85b74cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.

[16/50] [abbrv] hadoop git commit: HDFS-9884. Use doxia macro to generate in-page TOC of HDFS site documentation. (iwasakims)

2017-01-27 Thread subru
HDFS-9884. Use doxia macro to generate in-page TOC of HDFS site documentation. 
(iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55c9f6d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55c9f6d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55c9f6d7

Branch: refs/heads/YARN-2915
Commit: 55c9f6d747f448ee56f2ebf2292e6a70105de2c0
Parents: 88da9f6
Author: Masatake Iwasaki 
Authored: Fri Jan 27 08:06:39 2017 +0900
Committer: Masatake Iwasaki 
Committed: Fri Jan 27 08:06:39 2017 +0900

--
 .../src/site/markdown/ArchivalStorage.md|  14 +--
 .../site/markdown/CentralizedCacheManagement.md |  25 +---
 .../src/site/markdown/ExtendedAttributes.md |   8 +-
 .../hadoop-hdfs/src/site/markdown/Federation.md |  15 +--
 .../src/site/markdown/HDFSCommands.md   |  38 +-
 .../src/site/markdown/HDFSDiskbalancer.md   |   6 +-
 .../src/site/markdown/HDFSErasureCoding.md  |  10 +-
 .../markdown/HDFSHighAvailabilityWithNFS.md |  24 +---
 .../markdown/HDFSHighAvailabilityWithQJM.md |  25 +---
 .../hadoop-hdfs/src/site/markdown/HdfsDesign.md |  36 +-
 .../src/site/markdown/HdfsEditsViewer.md|   5 +-
 .../src/site/markdown/HdfsImageViewer.md|  11 +-
 .../src/site/markdown/HdfsMultihoming.md|   9 +-
 .../src/site/markdown/HdfsNfsGateway.md |   9 +-
 .../src/site/markdown/HdfsPermissionsGuide.md   |  15 +--
 .../src/site/markdown/HdfsQuotaAdminGuide.md|  10 +-
 .../src/site/markdown/HdfsRollingUpgrade.md |  18 +--
 .../src/site/markdown/HdfsSnapshots.md  |  16 +--
 .../src/site/markdown/HdfsUserGuide.md  |  23 +---
 .../hadoop-hdfs/src/site/markdown/LibHdfs.md|   8 +-
 .../src/site/markdown/MemoryStorage.md  |  14 +--
 .../src/site/markdown/SLGUserGuide.md   |   7 +-
 .../src/site/markdown/ShortCircuitLocalReads.md |   7 +-
 .../src/site/markdown/TransparentEncryption.md  |  27 +---
 .../hadoop-hdfs/src/site/markdown/ViewFs.md |  15 +--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 123 +--
 26 files changed, 27 insertions(+), 491 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55c9f6d7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 06b7390..56a2ab8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -15,19 +15,7 @@
 Archival Storage, SSD & Memory
 ==
 
-* [Archival Storage, SSD & Memory](#Archival_Storage_SSD__Memory)
-* [Introduction](#Introduction)
-* [Storage Types and Storage Policies](#Storage_Types_and_Storage_Policies)
-* [Storage Types: ARCHIVE, DISK, SSD and 
RAM\_DISK](#Storage_Types:_ARCHIVE_DISK_SSD_and_RAM_DISK)
-* [Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD and 
Lazy\_Persist](#Storage_Policies:_Hot_Warm_Cold_All_SSD_One_SSD_and_Lazy_Persist)
-* [Storage Policy Resolution](#Storage_Policy_Resolution)
-* [Configuration](#Configuration)
-* [Mover - A New Data Migration Tool](#Mover_-_A_New_Data_Migration_Tool)
-* [Storage Policy Commands](#Storage_Policy_Commands)
-* [List Storage Policies](#List_Storage_Policies)
-* [Set Storage Policy](#Set_Storage_Policy)
-* [Unset Storage Policy](#Unset_Storage_Policy)
-* [Get Storage Policy](#Get_Storage_Policy)
+
 
 Introduction
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55c9f6d7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 210d25c..89ad670 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -15,30 +15,7 @@
 Centralized Cache Management in HDFS
 
 
-* [Overview](#Overview)
-* [Use Cases](#Use_Cases)
-* [Architecture](#Architecture)
-* [Concepts](#Concepts)
-* [Cache directive](#Cache_directive)
-* [Cache pool](#Cache_pool)
-* [cacheadmin command-line interface](#cacheadmin_command-line_interface)
-* [Cache directive commands](#Cache_directive_commands)
-* [addDirective](#addDirective)
-* [removeDirective](#removeD

[40/50] [abbrv] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2017-01-27 Thread subru
YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2d5ff77
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2d5ff77
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2d5ff77

Branch: refs/heads/YARN-2915
Commit: e2d5ff7768d83948e45e0dd03558600cc6b79f91
Parents: 15ecaa4
Author: Subru Krishnan 
Authored: Thu Oct 13 17:59:13 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../AbstractConfigurableFederationPolicy.java   | 155 +
 .../policies/ConfigurableFederationPolicy.java  |   9 +-
 .../FederationPolicyInitializationContext.java  |  37 +-
 ...ionPolicyInitializationContextValidator.java |  28 +-
 .../policies/FederationPolicyManager.java   |  59 +-
 .../amrmproxy/AbstractAMRMProxyPolicy.java  |  47 ++
 .../amrmproxy/BroadcastAMRMProxyPolicy.java |  85 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  25 +-
 .../LocalityMulticastAMRMProxyPolicy.java   | 583 +++
 .../policies/amrmproxy/package-info.java|   1 -
 .../policies/dao/WeightedPolicyInfo.java| 180 +++---
 .../federation/policies/dao/package-info.java   |   1 -
 .../policies/exceptions/package-info.java   |   1 -
 .../federation/policies/package-info.java   |   1 -
 .../policies/router/AbstractRouterPolicy.java   |  47 ++
 .../router/BaseWeightedRouterPolicy.java| 150 -
 .../policies/router/FederationRouterPolicy.java |   5 +-
 .../policies/router/LoadBasedRouterPolicy.java  |  36 +-
 .../policies/router/PriorityRouterPolicy.java   |  19 +-
 .../router/UniformRandomRouterPolicy.java   |  28 +-
 .../router/WeightedRandomRouterPolicy.java  |  32 +-
 .../policies/router/package-info.java   |   1 -
 .../resolver/AbstractSubClusterResolver.java|   4 +-
 .../policies/BaseFederationPoliciesTest.java|  28 +-
 ...ionPolicyInitializationContextValidator.java |  25 +-
 .../TestBroadcastAMRMProxyFederationPolicy.java | 112 
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 566 ++
 .../router/TestLoadBasedRouterPolicy.java   |  18 +-
 .../router/TestPriorityRouterPolicy.java|  15 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  35 +-
 .../utils/FederationPoliciesTestUtil.java   |  64 ++
 .../src/test/resources/nodes|   6 +-
 32 files changed, 1950 insertions(+), 453 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2d5ff77/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
new file mode 100644
index 000..4cb9bbe
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import java.util.Map;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.NoActiveSubclustersException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.

[13/50] [abbrv] hadoop git commit: HDFS-11364. Add a test to verify Audit log entries for setfacl/getfacl commands over FS shell. Contributed by Manoj Govindassamy.

2017-01-27 Thread subru
HDFS-11364. Add a test to verify Audit log entries for setfacl/getfacl commands 
over FS shell. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44606aa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44606aa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44606aa8

Branch: refs/heads/YARN-2915
Commit: 44606aa8508a6e98219b8330e625c8d397bfb067
Parents: f85b74c
Author: Xiao Chen 
Authored: Thu Jan 26 10:44:29 2017 -0800
Committer: Xiao Chen 
Committed: Thu Jan 26 10:48:26 2017 -0800

--
 .../hdfs/server/namenode/TestAuditLogger.java   | 73 
 1 file changed, 73 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44606aa8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index d637abc..0e3cc8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
@@ -58,6 +59,14 @@ import java.util.List;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_ENABLED_KEY;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_MAX_SIZE_KEY;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY;
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
+import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+import static org.apache.hadoop.fs.permission.FsAction.ALL;
+import static org.apache.hadoop.fs.permission.FsAction.EXECUTE;
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.NNTOP_ENABLED_KEY;
@@ -444,6 +453,70 @@ public class TestAuditLogger {
   }
 
   /**
+   * Verify Audit log entries for the successful ACL API calls and ACL commands
+   * over FS Shell.
+   */
+  @Test (timeout = 6)
+  public void testAuditLogForAcls() throws Exception {
+final Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
+DummyAuditLogger.class.getName());
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+try {
+  cluster.waitClusterUp();
+  assertTrue(DummyAuditLogger.initialized);
+
+  final FileSystem fs = cluster.getFileSystem();
+  final Path p = new Path("/debug.log");
+  DFSTestUtil.createFile(fs, p, 1024, (short)1, 0L);
+
+  DummyAuditLogger.resetLogCount();
+  fs.getAclStatus(p);
+  assertEquals(1, DummyAuditLogger.logCount);
+
+  // FS shell command '-getfacl' additionally calls getFileInfo() and then
+  // followed by getAclStatus() only if the ACL bit is set. Since the
+  // initial permission didn't have the ACL bit set, getAclStatus() is
+  // skipped.
+  DFSTestUtil.FsShellRun("-getfacl " + p.toUri().getPath(), 0, null, conf);
+  assertEquals(2, DummyAuditLogger.logCount);
+
+  final List acls = Lists.newArrayList();
+  acls.add(AclTestHelpers.aclEntry(ACCESS, USER, ALL));
+  acls.add(AclTestHelpers.aclEntry(ACCESS, USER, "user1", ALL));
+  acls.add(AclTestHelpers.aclEntry(ACCESS, GROUP, READ_EXECUTE));
+  acls.add(AclTestHelpers.aclEntry(ACCESS, OTHER, EXECUTE));
+
+  fs.setAcl(p, acls);
+  assertEquals(3, DummyAuditLogger.logCount);
+
+  // Since the file has ACL bit set, FS shell command '-getfacl' should now
+  // call getAclStatus() additionally after getFileInfo().
+  DFSTestUtil.FsShe

[06/50] [abbrv] hadoop git commit: Add CHANGES, RELEASENOTES, and jdiff for 3.0.0-alpha2 release.

2017-01-27 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff02bdfe/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha2/RELEASENOTES.3.0.0-alpha2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha2/RELEASENOTES.3.0.0-alpha2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha2/RELEASENOTES.3.0.0-alpha2.md
new file mode 100644
index 000..843ce07
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha2/RELEASENOTES.3.0.0-alpha2.md
@@ -0,0 +1,618 @@
+
+
+# "Apache Hadoop"  3.0.0-alpha2 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | *Major* 
| **Incorporate Aliyun OSS file system implementation**
+
+Aliyun OSS is widely used among China’s cloud users and this work 
implemented a new Hadoop compatible filesystem AliyunOSSFileSystem with oss 
scheme, similar to the s3a and azure support.
+
+
+---
+
+* [HDFS-10760](https://issues.apache.org/jira/browse/HDFS-10760) | *Major* | 
**DataXceiver#run() should not log InvalidToken exception as an error**
+
+Log InvalidTokenException at trace level in DataXceiver#run().
+
+
+---
+
+* [HADOOP-13361](https://issues.apache.org/jira/browse/HADOOP-13361) | *Major* 
| **Modify hadoop\_verify\_user to be consistent with hadoop\_subcommand\_opts 
(ie more granularity)**
+
+Users:
+
+In Apache Hadoop 3.0.0-alpha1, verification required environment variables 
with the format of HADOOP\_(subcommand)\_USER where subcommand was lowercase 
applied globally.  This changes the format to be (command)\_(subcommand)\_USER 
where all are uppercase to be consistent with the \_OPTS functionality as well 
as being able to set per-command options.  Additionally, the check is now 
happening sooner, which should make it faster to fail.
+
+Developers:
+
+This changes hadoop\_verify\_user to require the program's name as part of the 
function call.  This is incompatible with Apache Hadoop 3.0.0-alpha1.
+
+
+---
+
+* [YARN-5549](https://issues.apache.org/jira/browse/YARN-5549) | *Critical* | 
**AMLauncher#createAMContainerLaunchContext() should not log the command to be 
launched indiscriminately**
+
+Introduces a new configuration property, 
yarn.resourcemanager.amlauncher.log.command.  If this property is set to true, 
then the AM command being launched will be masked in the RM log.
+
+
+---
+
+* [HDFS-6962](https://issues.apache.org/jira/browse/HDFS-6962) | *Critical* | 
**ACL inheritance conflicts with umaskmode**
+
+The original implementation of HDFS ACLs applied the client's umask to the 
permissions when inheriting a default ACL defined on a parent directory.  This 
behavior is a deviation from the POSIX ACL specification, which states that the 
umask has no influence when a default ACL propagates from parent to child.  
HDFS now offers the capability to ignore the umask in this case for improved 
compliance with POSIX.  This change is considered backward-incompatible, so the 
new behavior is off by default and must be explicitly configured by setting 
dfs.namenode.posix.acl.inheritance.enabled to true in hdfs-site.xml.  Please 
see the HDFS Permissions Guide for further details.
+
+
+---
+
+* [HADOOP-13341](https://issues.apache.org/jira/browse/HADOOP-13341) | *Major* 
| **Deprecate HADOOP\_SERVERNAME\_OPTS; replace with 
(command)\_(subcommand)\_OPTS**
+
+
+Users:
+* Ability to set per-command+sub-command options from the command line.
+* Makes daemon environment variable options consistent across the project. 
(See deprecation list below)
+* HADOOP\_CLIENT\_OPTS is now honored for every non-daemon sub-command. Prior 
to this change, many sub-commands did not use it.
+
+Developers:
+* No longer need to do custom handling for options in the case section of the 
shell scripts.
+* Consolidates all \_OPTS handling into hadoop-functions.sh to enable future 
projects.
+* All daemons running with secure mode features now get \_SECURE\_EXTRA\_OPTS 
support.
+
+\_OPTS Changes:
+
+| Old | New |
+|: |: |
+| HADOOP\_BALANCER\_OPTS | HDFS\_BALANCER\_OPTS | 
+| HADOOP\_DATANODE\_OPTS | HDFS\_DATANODE\_OPTS | 
+| HADOOP\_DN\_SECURE_EXTRA_OPTS | HDFS\_DATANODE\_SECURE\_EXTRA\_OPTS | 
+| HADOOP\_JOB\_HISTORYSERVER\_OPTS | MAPRED\_HISTORYSERVER\_OPTS | 
+| HADOOP\_JOURNALNODE\_OPTS | HDFS\_JOURNALNODE\_OPTS | 
+| HADOOP\_MOVER\_OPTS | HDFS\_MOVER\_OPTS | 
+| HADOOP\_NAMENODE\_OPTS | HDFS\_NAMENODE\_OPTS | 
+| HADOOP\_NFS3\_OPTS | HDFS\_NFS3\_OPTS | 
+| HADOOP\_NFS3\_SECURE\_EXTRA\_OPTS | HDFS\_NFS3\_SECURE\_EXTRA\_OPTS | | 
HADOOP\_PORTMAP\_OPTS | HDFS\_PORTMAP\_OPTS | 
+| HADOOP\_SECONDARYNAMENODE\_OPTS | 
+HDFS\_SECONDARYNAMENODE\_OPTS | 
+| HADOOP\_ZKFC\_OPTS | HDFS\_ZKFC\_OPTS |
+
+
+---
+
+* [HADOOP-13588](https://issues.

[15/50] [abbrv] hadoop git commit: Revert "HDFS-8377. Support HTTP/2 in datanode. Contributed by Duo Zhang."

2017-01-27 Thread subru
Revert "HDFS-8377. Support HTTP/2 in datanode. Contributed by Duo Zhang."

This reverts commit ada233b7cd7db39e609bb57e487fee8cec59cd48.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88da9f6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88da9f6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88da9f6b

Branch: refs/heads/YARN-2915
Commit: 88da9f6b6782423acd8ab7eb7d938720de7f3c0f
Parents: c65f884
Author: Xiao Chen 
Authored: Thu Jan 26 13:42:50 2017 -0800
Committer: Xiao Chen 
Committed: Thu Jan 26 13:42:50 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   5 -
 .../server/datanode/web/DatanodeHttpServer.java |  14 +-
 .../web/PortUnificationServerHandler.java   |  99 -
 .../datanode/web/SimpleHttpProxyHandler.java|  18 +--
 .../hdfs/server/datanode/web/URLDispatcher.java |  10 +-
 .../datanode/web/dtp/DtpHttp2FrameListener.java |  52 ---
 .../datanode/web/dtp/DtpHttp2Handler.java   |  34 -
 .../datanode/web/webhdfs/ExceptionHandler.java  |  29 ++--
 .../server/datanode/web/webhdfs/HdfsWriter.java |  10 +-
 .../datanode/web/webhdfs/WebHdfsHandler.java|  52 +++
 .../offlineImageViewer/FSImageHandler.java  |  41 +++---
 .../datanode/web/dtp/Http2ResponseHandler.java  |  65 
 .../server/datanode/web/dtp/TestDtpHttp2.java   | 147 ---
 hadoop-project/pom.xml  |   6 -
 14 files changed, 90 insertions(+), 492 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88da9f6b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index e39bf71..9e59a31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -179,11 +179,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   compile
 
 
-  com.twitter
-  hpack
-  compile
-
-
   xerces
   xercesImpl
   compile

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88da9f6b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index 74e0916..b51b1fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -26,8 +26,8 @@ import javax.servlet.FilterConfig;
 import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 
+import io.netty.bootstrap.ChannelFactory;
 import io.netty.bootstrap.ServerBootstrap;
-import io.netty.channel.ChannelFactory;
 import io.netty.channel.ChannelFuture;
 import io.netty.channel.ChannelInitializer;
 import io.netty.channel.ChannelOption;
@@ -138,8 +138,16 @@ public class DatanodeHttpServer implements Closeable {
 .childHandler(new ChannelInitializer() {
 @Override
 protected void initChannel(SocketChannel ch) throws Exception {
-  ch.pipeline().addLast(new PortUnificationServerHandler(jettyAddr,
-  conf, confForCreate, restCsrfPreventionFilter));
+  ChannelPipeline p = ch.pipeline();
+  p.addLast(new HttpRequestDecoder(),
+new HttpResponseEncoder());
+  if (restCsrfPreventionFilter != null) {
+p.addLast(new RestCsrfPreventionFilterHandler(
+restCsrfPreventionFilter));
+  }
+  p.addLast(
+  new ChunkedWriteHandler(),
+  new URLDispatcher(jettyAddr, conf, confForCreate));
 }
   });
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88da9f6b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
deleted file mode 100644
index ff10c6d..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Sof

[37/50] [abbrv] hadoop git commit: YARN-5634. Simplify initialization/use of RouterPolicy via a RouterPolicyFacade. (Carlo Curino via Subru).

2017-01-27 Thread subru
YARN-5634. Simplify initialization/use of RouterPolicy via a 
RouterPolicyFacade. (Carlo Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ace063a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ace063a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ace063a7

Branch: refs/heads/YARN-2915
Commit: ace063a7047bea43dd7225049683b14bbeaf6682
Parents: 5f3c2b8
Author: Subru Krishnan 
Authored: Wed Nov 16 19:39:25 2016 -0800
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../dev-support/findbugs-exclude.xml|   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../yarn/conf/TestYarnConfigurationFields.java  |  12 +
 ...ionPolicyInitializationContextValidator.java |   2 +-
 .../PriorityBroadcastPolicyManager.java |  66 +
 .../federation/policies/RouterPolicyFacade.java | 266 +++
 .../policies/dao/WeightedPolicyInfo.java|   6 +-
 .../utils/FederationStateStoreFacade.java   |  16 +-
 .../TestPriorityBroadcastPolicyManager.java |  72 +
 .../policies/TestRouterPolicyFacade.java| 220 +++
 .../utils/FederationStateStoreTestUtil.java |  22 +-
 11 files changed, 693 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ace063a7/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index bbd03a9..ee51094 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -310,6 +310,15 @@
 
   
 
+  
+
+
+  
+  
+
+
+  
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ace063a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b9aa73c..e8e76d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2517,6 +2517,19 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 
+  public static final String DEFAULT_FEDERATION_POLICY_KEY = "*";
+
+  public static final String FEDERATION_POLICY_MANAGER = FEDERATION_PREFIX
+  + "policy-manager";
+
+  public static final String DEFAULT_FEDERATION_POLICY_MANAGER = "org.apache"
+  + 
".hadoop.yarn.server.federation.policies.UniformBroadcastPolicyManager";
+
+  public static final String FEDERATION_POLICY_MANAGER_PARAMS =
+  FEDERATION_PREFIX + "policy-manager-params";
+
+  public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = "";
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ace063a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 3f3a06c..6e33c0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -78,6 +78,18 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.RM_EPOCH);
 
+// Federation policies configs to be ignored
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_POLICY_MANAGER);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_POLICY_MANAGER_PARAMS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY);
+con

[01/50] [abbrv] hadoop git commit: YARN-5830. FairScheduler: Avoid preempting AM containers. (Yufei Gu via kasha) [Forced Update!]

2017-01-27 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 c58725b8d -> 08dc09581 (forced update)


YARN-5830. FairScheduler: Avoid preempting AM containers. (Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abedb8a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abedb8a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abedb8a9

Branch: refs/heads/YARN-2915
Commit: abedb8a9d86b4593a37fd3d2313fbcb057c7846a
Parents: b782bf2
Author: Karthik Kambatla 
Authored: Wed Jan 25 12:17:28 2017 -0800
Committer: Karthik Kambatla 
Committed: Wed Jan 25 12:17:28 2017 -0800

--
 .../scheduler/SchedulerNode.java|  21 ++-
 .../scheduler/fair/FSPreemptionThread.java  | 135 ++-
 .../fair/TestFairSchedulerPreemption.java   | 103 +++---
 3 files changed, 206 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abedb8a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 59ca81b..9c2dff3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import java.util.ArrayList;
+import java.util.LinkedList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -370,8 +371,8 @@ public abstract class SchedulerNode {
   }
 
   /**
-   * Get the running containers in the node.
-   * @return List of running containers in the node.
+   * Get the containers running on the node.
+   * @return A copy of containers running on the node.
*/
   public synchronized List getCopiedListOfRunningContainers() {
 List result = new ArrayList<>(launchedContainers.size());
@@ -382,6 +383,22 @@ public abstract class SchedulerNode {
   }
 
   /**
+   * Get the containers running on the node with AM containers at the end.
+   * @return A copy of running containers with AM containers at the end.
+   */
+  public synchronized List getRunningContainersWithAMsAtTheEnd() {
+LinkedList result = new LinkedList<>();
+for (ContainerInfo info : launchedContainers.values()) {
+  if(info.container.isAMContainer()) {
+result.addLast(info.container);
+  } else {
+result.addFirst(info.container);
+  }
+}
+return result;
+  }
+
+  /**
* Get the container for the specified container ID.
* @param containerId The container ID
* @return The container for the specified container ID

http://git-wip-us.apache.org/repos/asf/hadoop/blob/abedb8a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index f432484..f166878 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
@@ -65,10 +65,10 @@ class FSPreemptionThread extends Thread {
   try{
 starvedApp = context.getStarvedApps().take();
 if (!Resources.isNone(starvedApp.getStarvation())) {
-  List containers =
+  PreemptableContainers containers =
   identifyContainersToPreempt(starvedApp);
   if (containers !

[20/50] [abbrv] hadoop git commit: YARN-6126. Obtaining app logs for Running application fails with json parse error. Contributed by Xuan Gong.

2017-01-27 Thread subru
YARN-6126. Obtaining app logs for Running application fails with json parse 
error. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a16431b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a16431b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a16431b

Branch: refs/heads/YARN-2915
Commit: 1a16431bd0df52d2df32c594f8b9b506c2101ad8
Parents: 165f07f
Author: Junping Du 
Authored: Fri Jan 27 06:31:10 2017 -0800
Committer: Junping Du 
Committed: Fri Jan 27 06:31:10 2017 -0800

--
 .../java/org/apache/hadoop/yarn/client/cli/LogsCLI.java  | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a16431b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index b8119e5..1de4cd1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -426,8 +426,17 @@ public class LogsCLI extends Configured implements Tool {
   if (response.getStatusInfo().getStatusCode() ==
   ClientResponse.Status.OK.getStatusCode()) {
 try {
+  JSONArray array = new JSONArray();
   JSONObject json = response.getEntity(JSONObject.class);
-  JSONArray array = json.getJSONArray("containerLogsInfo");
+  Object logsInfoObj = json.get("containerLogsInfo");
+  if (logsInfoObj instanceof JSONObject) {
+array.put((JSONObject)logsInfoObj);
+  } else if (logsInfoObj instanceof JSONArray) {
+JSONArray logsArray = (JSONArray)logsInfoObj;
+for (int i=0; i < logsArray.length(); i++) {
+  array.put(logsArray.getJSONObject(i));
+}
+  }
   for (int i = 0; i < array.length(); i++) {
 JSONObject log = array.getJSONObject(i);
 Object ob = log.get("containerLogInfo");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-5676. Add a HashBasedRouterPolicy, and small policies and test refactoring. (Carlo Curino via Subru).

2017-01-27 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fd588f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
index 4975a9f..5fa02d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.PriorityBroadcastPolicyManager;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager;
 import 
org.apache.hadoop.yarn.server.federation.policies.router.PriorityRouterPolicy;
 import 
org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy;
 import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fd588f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
deleted file mode 100644
index 542a5ae..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.federation.policies;
-
-import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy;
-import 
org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy;
-import org.junit.Before;
-
-/**
- * Simple test of {@link UniformBroadcastPolicyManager}.
- */
-public class TestUniformBroadcastPolicyManager extends BasePolicyManagerTest {
-
-  @Before
-  public void setup() {
-//config policy
-wfp = new UniformBroadcastPolicyManager();
-wfp.setQueue("queue1");
-
-//set expected params that the base test class will use for tests
-expectedPolicyManager = UniformBroadcastPolicyManager.class;
-expectedAMRMProxyPolicy = BroadcastAMRMProxyPolicy.class;
-expectedRouterPolicy = UniformRandomRouterPolicy.class;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fd588f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestWeightedLocalityPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestWeightedLocalityPolicyManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-se

[05/50] [abbrv] hadoop git commit: MAPREDUCE-6808. Log map attempts as part of shuffle handler audit log (Contributed by Gergő Pásztor via Daniel Templeton)

2017-01-27 Thread subru
MAPREDUCE-6808. Log map attempts as part of shuffle handler audit log 
(Contributed by Gergő Pásztor via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7463b6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7463b6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7463b6c

Branch: refs/heads/YARN-2915
Commit: a7463b6c88f698950a2f326030261001aa51b35e
Parents: 9e19f75
Author: Daniel Templeton 
Authored: Wed Jan 25 14:30:50 2017 -0800
Committer: Daniel Templeton 
Committed: Wed Jan 25 14:32:40 2017 -0800

--
 .../src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7463b6c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 9547062..15a1b89 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -947,7 +947,7 @@ public class ShuffleHandler extends AuxiliaryService {
   // to turn it on please enable this audit log
   // on log4j.properties by uncommenting the setting
   if (AUDITLOG.isDebugEnabled()) {
-AUDITLOG.debug("shuffle for " + jobQ.get(0) +
+AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
  " reducer " + reduceQ.get(0));
   }
   int reduceId;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HADOOP-13992. KMS should load SSL configuration the same way as SSLFactory. Contributed by John Zhuge.

2017-01-27 Thread subru
HADOOP-13992. KMS should load SSL configuration the same way as SSLFactory. 
Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebd40056
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebd40056
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebd40056

Branch: refs/heads/YARN-2915
Commit: ebd40056a07df5807baf0652a47ea97334038f4d
Parents: 1a16431
Author: Xiao Chen 
Authored: Fri Jan 27 10:49:26 2017 -0800
Committer: Xiao Chen 
Committed: Fri Jan 27 10:49:26 2017 -0800

--
 .../apache/hadoop/security/ssl/SSLFactory.java  | 11 ++
 .../crypto/key/kms/server/KMSWebServer.java | 21 +---
 .../hadoop/crypto/key/kms/server/MiniKMS.java   | 11 ++
 3 files changed, 23 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd40056/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
index d72f9be..07eafab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
@@ -128,9 +128,10 @@ public class SSLFactory implements ConnectionConfigurator {
   throw new IllegalArgumentException("mode cannot be NULL");
 }
 this.mode = mode;
-requireClientCert = conf.getBoolean(SSL_REQUIRE_CLIENT_CERT_KEY,
+Configuration sslConf = readSSLConfiguration(conf, mode);
+
+requireClientCert = sslConf.getBoolean(SSL_REQUIRE_CLIENT_CERT_KEY,
 SSL_REQUIRE_CLIENT_CERT_DEFAULT);
-Configuration sslConf = readSSLConfiguration(mode);
 
 Class klass
   = conf.getClass(KEYSTORES_FACTORY_CLASS_KEY,
@@ -149,9 +150,11 @@ public class SSLFactory implements ConnectionConfigurator {
 }
   }
 
-  private Configuration readSSLConfiguration(Mode mode) {
+  public static Configuration readSSLConfiguration(Configuration conf,
+   Mode mode) {
 Configuration sslConf = new Configuration(false);
-sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, requireClientCert);
+sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, conf.getBoolean(
+SSL_REQUIRE_CLIENT_CERT_KEY, SSL_REQUIRE_CLIENT_CERT_DEFAULT));
 String sslConfResource;
 if (mode == Mode.CLIENT) {
   sslConfResource = conf.get(SSL_CLIENT_CONF_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd40056/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
index 70945cb..02c4a42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java
@@ -46,13 +46,7 @@ public class KMSWebServer {
   private final HttpServer2 httpServer;
   private final String scheme;
 
-  KMSWebServer(Configuration cnf) throws Exception {
-ConfigurationWithLogging conf = new ConfigurationWithLogging(cnf);
-
-// Add SSL configuration file
-conf.addResource(conf.get(SSLFactory.SSL_SERVER_CONF_KEY,
-SSLFactory.SSL_SERVER_CONF_DEFAULT));
-
+  KMSWebServer(Configuration conf, Configuration sslConf) throws Exception {
 // Override configuration with deprecated environment variables.
 deprecateEnv("KMS_TEMP", conf, HttpServer2.HTTP_TEMP_DIR_KEY,
 KMSConfiguration.KMS_SITE_XML);
@@ -68,10 +62,10 @@ public class KMSWebServer {
 KMSConfiguration.KMS_SITE_XML);
 deprecateEnv("KMS_SSL_ENABLED", conf,
 KMSConfiguration.SSL_ENABLED_KEY, KMSConfiguration.KMS_SITE_XML);
-deprecateEnv("KMS_SSL_KEYSTORE_FILE", conf,
+deprecateEnv("KMS_SSL_KEYSTORE_FILE", sslConf,
 SSLFactory.SSL_SERVER_KEYSTORE_LOCATION,
 SSLFactory.SSL_SERVER_CONF_DEFAULT);
-deprecateEnv("KMS_SSL_KEYSTORE_PASS", conf,
+deprecateEnv("KMS_SSL_KEYSTORE_PASS", sslConf,
 SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD,
 SSLFactory.SSL_SERVER_CONF_DEFAULT);
 
@@ -88,7 +82,7 @@ public class KMSWebServer {
 httpServer = new HttpServer2.Builder()
 .setName(NAME)

[18/50] [abbrv] hadoop git commit: HADOOP-14029. Fix KMSClientProvider for non-secure proxyuser use case. Contributed by Xiaoyu Yao.

2017-01-27 Thread subru
HADOOP-14029. Fix KMSClientProvider for non-secure proxyuser use case. 
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20343157
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20343157
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20343157

Branch: refs/heads/YARN-2915
Commit: 2034315763cd7b1eb77e96c719918fc14e2dabf6
Parents: 7bc333a
Author: Xiaoyu Yao 
Authored: Thu Jan 26 20:34:32 2017 -0800
Committer: Xiaoyu Yao 
Committed: Thu Jan 26 20:34:32 2017 -0800

--
 .../apache/hadoop/crypto/key/kms/KMSClientProvider.java  | 11 ++-
 .../org/apache/hadoop/crypto/key/kms/server/TestKMS.java |  6 +-
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20343157/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index ccc8968..4c6b625 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -1096,13 +1096,14 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   // Use real user for proxy user
   actualUgi = currentUgi.getRealUser();
 }
-
-if (!containsKmsDt(actualUgi) &&
+if (UserGroupInformation.isSecurityEnabled() &&
+!containsKmsDt(actualUgi) &&
 !actualUgi.hasKerberosCredentials()) {
-  // Use login user for user that does not have either
+  // Use login user is only necessary when Kerberos is enabled
+  // but the actual user does not have either
   // Kerberos credential or KMS delegation token for KMS operations
-  LOG.debug("using loginUser no KMS Delegation Token "
-  + "no Kerberos Credentials");
+  LOG.debug("Using loginUser when Kerberos is enabled but the actual user" 
+
+  " does not have either KMS Delegation Token or Kerberos 
Credentials");
   actualUgi = UserGroupInformation.getLoginUser();
 }
 return actualUgi;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20343157/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 3a2d53c..72301db 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -2419,7 +2419,11 @@ public class TestKMS {
 
   public void doWebHDFSProxyUserTest(final boolean kerberos) throws Exception {
 Configuration conf = new Configuration();
-conf.set("hadoop.security.authentication", "kerberos");
+if (kerberos) {
+  conf.set("hadoop.security.authentication", "kerberos");
+}
+UserGroupInformation.setConfiguration(conf);
+
 final File testDir = getTestDir();
 conf = createBaseKMSConf(testDir, conf);
 if (kerberos) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-5519. Add SubClusterId in AddApplicationHomeSubClusterResponse for Router Failover. (Ellen Hui via Subru)

2017-01-27 Thread subru
YARN-5519. Add SubClusterId in AddApplicationHomeSubClusterResponse for Router 
Failover. (Ellen Hui via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a3ef845
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a3ef845
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a3ef845

Branch: refs/heads/YARN-2915
Commit: 4a3ef8451c7ecc4392e0bcc3f256cd8213a0190c
Parents: 82fca62
Author: Subru Krishnan 
Authored: Mon Aug 15 14:47:02 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 ...ederationApplicationHomeSubClusterStore.java | 21 +++---
 .../store/impl/MemoryFederationStateStore.java  | 22 +++---
 .../AddApplicationHomeSubClusterResponse.java   | 29 ++--
 ...ApplicationHomeSubClusterResponsePBImpl.java | 39 +++
 .../proto/yarn_server_federation_protos.proto   |  1 +
 .../impl/FederationStateStoreBaseTest.java  | 71 +---
 6 files changed, 120 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a3ef845/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
index 22bb88a..ace2457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -51,15 +51,20 @@ public interface FederationApplicationHomeSubClusterStore {
   /**
* Register the home {@code SubClusterId} of the newly submitted
* {@code ApplicationId}. Currently response is empty if the operation was
-   * successful, if not an exception reporting reason for a failure.
+   * successful, if not an exception reporting reason for a failure. If a
+   * mapping for the application already existed, the {@code SubClusterId} in
+   * this response will return the existing mapping which might be different
+   * from that in the {@code AddApplicationHomeSubClusterRequest}.
*
* @param request the request to register a new application with its home
*  sub-cluster
-   * @return empty on successful registration of the application in the
-   * StateStore, if not an exception reporting reason for a failure
+   * @return upon successful registration of the application in the StateStore,
+   * {@code AddApplicationHomeSubClusterRequest} containing the home
+   * sub-cluster of the application. Otherwise, an exception reporting
+   * reason for a failure
* @throws YarnException if the request is invalid/fails
*/
-  AddApplicationHomeSubClusterResponse addApplicationHomeSubClusterMap(
+  AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster(
   AddApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -73,7 +78,7 @@ public interface FederationApplicationHomeSubClusterStore {
* not an exception reporting reason for a failure
* @throws YarnException if the request is invalid/fails
*/
-  UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubClusterMap(
+  UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster(
   UpdateApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -85,7 +90,7 @@ public interface FederationApplicationHomeSubClusterStore {
* subcluster
* @throws YarnException if the request is invalid/fails
*/
-  GetApplicationHomeSubClusterResponse getApplicationHomeSubClusterMap(
+  GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster(
   GetApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -96,7 +101,7 @@ public interface FederationApplicationHomeSubClusterStore {
* @return the mapping of all submitted application to it's home sub-cluster
* @throws YarnException if the request is invalid/fails
*/
-  GetApplicationsHomeSubClusterResponse getApplicationsHomeSubClusterMap(
+  GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster(
   GetApplicationsHomeSubClusterRequest request) 

[08/50] [abbrv] hadoop git commit: Add CHANGES, RELEASENOTES, and jdiff for 3.0.0-alpha2 release.

2017-01-27 Thread subru
Add CHANGES, RELEASENOTES, and jdiff for 3.0.0-alpha2 release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff02bdfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff02bdfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff02bdfe

Branch: refs/heads/YARN-2915
Commit: ff02bdfe65b5ff894efd1a3aa0e35bac9f4b783c
Parents: a7463b6
Author: Andrew Wang 
Authored: Wed Jan 25 12:49:29 2017 -0800
Committer: Andrew Wang 
Committed: Wed Jan 25 15:39:40 2017 -0800

--
 .../3.0.0-alpha2/CHANGES.3.0.0-alpha2.md| 927 +++
 .../3.0.0-alpha2/RELEASENOTES.3.0.0-alpha2.md   | 618 +
 .../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha2.xml   | 326 +++
 3 files changed, 1871 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HDFS-11374. Skip FSync in CreateEditsLog to speed up edit log generation. Contributed by Hanisha Koneru.

2017-01-27 Thread subru
HDFS-11374. Skip FSync in CreateEditsLog to speed up edit log generation. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bc333ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bc333ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bc333ad

Branch: refs/heads/YARN-2915
Commit: 7bc333ad4156fd2a9f946103e4e1cf917fae4c3a
Parents: 55c9f6d
Author: Arpit Agarwal 
Authored: Thu Jan 26 16:21:25 2017 -0800
Committer: Arpit Agarwal 
Committed: Thu Jan 26 16:21:25 2017 -0800

--
 .../org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bc333ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
index 733dd71..bdb2101 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
@@ -199,9 +199,9 @@ public class CreateEditsLog {
 System.exit(-1);
   }
 }
-
 
 FileNameGenerator nameGenerator = new FileNameGenerator(BASE_PATH, 100);
+EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
 FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
 editLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
 addFiles(editLog, numFiles, replication, numBlocksPerFile, startingBlockId,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: YARN-5391. PolicyManager to tie together Router/AMRM Federation policies. (Carlo Curino via Subru).

2017-01-27 Thread subru
YARN-5391. PolicyManager to tie together Router/AMRM Federation policies. 
(Carlo Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f3c2b8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f3c2b8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f3c2b8c

Branch: refs/heads/YARN-2915
Commit: 5f3c2b8c9ca2560f111c85042cd99d01c421a919
Parents: e2d5ff7
Author: Subru Krishnan 
Authored: Tue Nov 1 19:54:18 2016 -0700
Committer: Subru Krishnan 
Committed: Fri Jan 27 15:34:18 2017 -0800

--
 .../policies/AbstractPolicyManager.java | 175 +++
 .../FederationPolicyInitializationContext.java  |   3 +-
 .../policies/UniformBroadcastPolicyManager.java |  56 ++
 .../policies/WeightedLocalityPolicyManager.java |  67 +++
 .../records/SubClusterPolicyConfiguration.java  |  13 ++
 .../policies/BasePolicyManagerTest.java | 108 
 ...ionPolicyInitializationContextValidator.java |   5 +-
 .../TestUniformBroadcastPolicyManager.java  |  40 +
 .../TestWeightedLocalityPolicyManager.java  |  79 +
 .../utils/FederationPoliciesTestUtil.java   |   2 +-
 10 files changed, 545 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f3c2b8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
new file mode 100644
index 000..e77f2e3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class provides basic implementation for common methods that multiple
+ * policies will need to implement.
+ */
+public abstract class AbstractPolicyManager implements
+FederationPolicyManager {
+
+  private String queue;
+  @SuppressWarnings("checkstyle:visibilitymodifier")
+  protected Class routerFederationPolicy;
+  @SuppressWarnings("checkstyle:visibilitymodifier")
+  protected Class amrmProxyFederationPolicy;
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(AbstractPolicyManager.class);
+  /**
+   * This default implementation validates the
+   * {@link FederationPolicyInitializationContext},
+   * then checks whether it needs to reinstantiate the class (null or
+   * mismatching type), and reinitialize the policy.
+   *
+   * @param federationPolicyContext the current context
+   * @param oldInstance the existing (possibly null) instance.
+   *
+   * @return a valid and fully reinitalized {@link FederationAMRMProxyPolicy}
+   * instance
+   *
+   * @throws FederationPolicyInitializationException if the reinitalization is
+   * not valid, and ensure
+   * previous state is 
preserved
+   */
+  public FederationAMRMProxyPolicy getAMRMPolicy(
+  FederationPolicyInitializat

[02/50] [abbrv] hadoop git commit: HADOOP-13433 Race in UGI.reloginFromKeytab. Contributed by Duo Zhang.

2017-01-27 Thread subru
HADOOP-13433 Race in UGI.reloginFromKeytab. Contributed by Duo Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fc3e68a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fc3e68a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fc3e68a

Branch: refs/heads/YARN-2915
Commit: 7fc3e68a876132563aa2321519fc6941e37b2cae
Parents: abedb8a
Author: Steve Loughran 
Authored: Wed Jan 25 21:29:27 2017 +
Committer: Steve Loughran 
Committed: Wed Jan 25 21:29:27 2017 +

--
 hadoop-common-project/hadoop-common/pom.xml |   5 +
 .../hadoop/security/UserGroupInformation.java   |  65 ++--
 .../security/TestFixKerberosTicketOrder.java| 158 ++
 .../hadoop/security/TestRaceWhenRelogin.java| 162 +++
 4 files changed, 375 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fc3e68a/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index b69de55..909cd78 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -46,6 +46,11 @@
   compile
 
 
+  org.apache.hadoop
+  hadoop-minikdc
+  test
+
+
   com.google.guava
   guava
   compile

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fc3e68a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index cf240ff..6574e55 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -24,6 +24,8 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FI
 import static org.apache.hadoop.security.UGIExceptionMessages.*;
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -45,6 +47,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
+import javax.security.auth.DestroyFailedException;
 import javax.security.auth.Subject;
 import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.kerberos.KerberosPrincipal;
@@ -76,8 +79,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-
-import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -1165,10 +1166,41 @@ public class UserGroupInformation {
 reloginFromKeytab();
   }
 
+  // if the first kerberos ticket is not TGT, then remove and destroy it since
+  // the kerberos library of jdk always use the first kerberos ticket as TGT.
+  // See HADOOP-13433 for more details.
+  @VisibleForTesting
+  void fixKerberosTicketOrder() {
+Set creds = getSubject().getPrivateCredentials();
+synchronized (creds) {
+  for (Iterator iter = creds.iterator(); iter.hasNext();) {
+Object cred = iter.next();
+if (cred instanceof KerberosTicket) {
+  KerberosTicket ticket = (KerberosTicket) cred;
+  if (!ticket.getServer().getName().startsWith("krbtgt")) {
+LOG.warn(
+"The first kerberos ticket is not TGT"
++ "(the server principal is {}), remove and destroy it.",
+ticket.getServer());
+iter.remove();
+try {
+  ticket.destroy();
+} catch (DestroyFailedException e) {
+  LOG.warn("destroy ticket failed", e);
+}
+  } else {
+return;
+  }
+}
+  }
+}
+LOG.warn("Warning, no kerberos ticket found while attempting to renew 
ticket");
+  }
+
   /**
* Re-Login a user in from a keytab file. Loads a user identity from a keytab
* file and logs them in. They become the currently logged-in user. This
-   * method assumes that {@link #loginUserFromKeytab(String, String)} had 
+   * method assumes that {@link #loginUserFromKeytab(String, String)} had
* happened already.
* The Subject field of this UserGroupInformation object is updated

hadoop git commit: HDFS-9884. Use doxia macro to generate in-page TOC of HDFS site documentation. (iwasakims)

2017-01-27 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d20dc8691 -> 528bff9c4


HDFS-9884. Use doxia macro to generate in-page TOC of HDFS site documentation. 
(iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/528bff9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/528bff9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/528bff9c

Branch: refs/heads/branch-2
Commit: 528bff9c42ed3e9d8e989df8fda317fa079ef43c
Parents: d20dc86
Author: Masatake Iwasaki 
Authored: Sat Jan 28 08:11:40 2017 +0900
Committer: Masatake Iwasaki 
Committed: Sat Jan 28 08:11:40 2017 +0900

--
 .../src/site/markdown/ArchivalStorage.md|  14 +--
 .../site/markdown/CentralizedCacheManagement.md |  25 +---
 .../src/site/markdown/ExtendedAttributes.md |   8 +-
 .../hadoop-hdfs/src/site/markdown/Federation.md |  15 +--
 .../src/site/markdown/HDFSCommands.md   |  35 +-
 .../markdown/HDFSHighAvailabilityWithNFS.md |  25 +---
 .../markdown/HDFSHighAvailabilityWithQJM.md |  25 +---
 .../hadoop-hdfs/src/site/markdown/HdfsDesign.md |  36 +-
 .../src/site/markdown/HdfsEditsViewer.md|   5 +-
 .../src/site/markdown/HdfsImageViewer.md|  11 +-
 .../src/site/markdown/HdfsMultihoming.md|   9 +-
 .../src/site/markdown/HdfsNfsGateway.md |   9 +-
 .../src/site/markdown/HdfsPermissionsGuide.md   |  15 +--
 .../src/site/markdown/HdfsQuotaAdminGuide.md|  10 +-
 .../src/site/markdown/HdfsRollingUpgrade.md |  20 +--
 .../src/site/markdown/HdfsSnapshots.md  |  16 +--
 .../src/site/markdown/HdfsUserGuide.md  |  23 +---
 .../hadoop-hdfs/src/site/markdown/Hftp.md   |   6 +-
 .../hadoop-hdfs/src/site/markdown/LibHdfs.md|   8 +-
 .../src/site/markdown/MemoryStorage.md  |  14 +--
 .../src/site/markdown/SLGUserGuide.md   |   7 +-
 .../src/site/markdown/ShortCircuitLocalReads.md |   7 +-
 .../src/site/markdown/TransparentEncryption.md  |  27 +---
 .../hadoop-hdfs/src/site/markdown/ViewFs.md |  15 +--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 123 +--
 25 files changed, 26 insertions(+), 482 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/528bff9c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 803cc91..44cb9c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -15,19 +15,7 @@
 Archival Storage, SSD & Memory
 ==
 
-* [Archival Storage, SSD & Memory](#Archival_Storage_SSD__Memory)
-* [Introduction](#Introduction)
-* [Storage Types and Storage Policies](#Storage_Types_and_Storage_Policies)
-* [Storage Types: ARCHIVE, DISK, SSD and 
RAM\_DISK](#Storage_Types:_ARCHIVE_DISK_SSD_and_RAM_DISK)
-* [Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD and 
Lazy\_Persist](#Storage_Policies:_Hot_Warm_Cold_All_SSD_One_SSD_and_Lazy_Persist)
-* [Storage Policy Resolution](#Storage_Policy_Resolution)
-* [Configuration](#Configuration)
-* [Mover - A New Data Migration Tool](#Mover_-_A_New_Data_Migration_Tool)
-* [Storage Policy Commands](#Storage_Policy_Commands)
-* [List Storage Policies](#List_Storage_Policies)
-* [Set Storage Policy](#Set_Storage_Policy)
-* [Unset Storage Policy](#Unset_Storage_Policy)
-* [Get Storage Policy](#Get_Storage_Policy)
+
 
 Introduction
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/528bff9c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 210d25c..89ad670 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -15,30 +15,7 @@
 Centralized Cache Management in HDFS
 
 
-* [Overview](#Overview)
-* [Use Cases](#Use_Cases)
-* [Architecture](#Architecture)
-* [Concepts](#Concepts)
-* [Cache directive](#Cache_directive)
-* [Cache pool](#Cache_pool)
-* [cacheadmin command-line interface](#cacheadmin_command-line_interface)
-* [Cache directive commands](#Cache_directive_commands)
-* [addDirective](#addDirective)
-* [

hadoop git commit: HDFS-9911. TestDataNodeLifeline Fails intermittently. Contributed by Yiqun Lin (cherry picked from commit a95639068c99ebcaefe8b6c4268449d12a6577d6)

2017-01-27 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1a6b6e249 -> 4f135647f


HDFS-9911. TestDataNodeLifeline Fails intermittently. Contributed by Yiqun Lin
(cherry picked from commit a95639068c99ebcaefe8b6c4268449d12a6577d6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f135647
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f135647
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f135647

Branch: refs/heads/branch-2.8
Commit: 4f135647fd4033d96e38f761a844cc243edfe1a8
Parents: 1a6b6e2
Author: Anu Engineer 
Authored: Fri Dec 16 09:46:21 2016 -0800
Committer: Eric Payne 
Committed: Fri Jan 27 21:09:03 2017 +

--
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f135647/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 2e430af..06c25d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -1027,7 +1027,7 @@ class BPServiceActor implements Runnable {
 volatile long nextHeartbeatTime = monotonicNow();
 
 @VisibleForTesting
-volatile long nextLifelineTime = monotonicNow();
+volatile long nextLifelineTime;
 
 @VisibleForTesting
 volatile long lastBlockReportTime = monotonicNow();
@@ -1050,6 +1050,7 @@ class BPServiceActor implements Runnable {
   this.heartbeatIntervalMs = heartbeatIntervalMs;
   this.lifelineIntervalMs = lifelineIntervalMs;
   this.blockReportIntervalMs = blockReportIntervalMs;
+  scheduleNextLifeline(nextHeartbeatTime);
 }
 
 // This is useful to make sure NN gets Heartbeat before Blockreport


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-9911. TestDataNodeLifeline Fails intermittently. Contributed by Yiqun Lin

2017-01-27 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4703f5d20 -> d20dc8691


HDFS-9911. TestDataNodeLifeline Fails intermittently. Contributed by Yiqun Lin

(cherry picked from commit a95639068c99ebcaefe8b6c4268449d12a6577d6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d20dc869
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d20dc869
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d20dc869

Branch: refs/heads/branch-2
Commit: d20dc8691e984fc4b3966a478b64f027ea7f8a05
Parents: 4703f5d
Author: Anu Engineer 
Authored: Fri Dec 16 09:46:21 2016 -0800
Committer: Eric Payne 
Committed: Fri Jan 27 20:55:59 2017 +

--
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d20dc869/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 5bfdaec..c605588 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -1083,7 +1083,7 @@ class BPServiceActor implements Runnable {
 volatile long nextHeartbeatTime = monotonicNow();
 
 @VisibleForTesting
-volatile long nextLifelineTime = monotonicNow();
+volatile long nextLifelineTime;
 
 @VisibleForTesting
 volatile long lastBlockReportTime = monotonicNow();
@@ -,6 +,7 @@ class BPServiceActor implements Runnable {
   this.lifelineIntervalMs = lifelineIntervalMs;
   this.blockReportIntervalMs = blockReportIntervalMs;
   this.slowPeersReportIntervalMs = slowPeersReportIntervalMs;
+  scheduleNextLifeline(nextHeartbeatTime);
 }
 
 // This is useful to make sure NN gets Heartbeat before Blockreport


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: Revert "Fix synchronization issues of AbstractYarnScheduler#nodeUpdate and its implementations. (Naganarasimha G R via wangda)" to add correct JIRA number

2017-01-27 Thread vvasudev
Revert "Fix synchronization issues of AbstractYarnScheduler#nodeUpdate and its 
implementations. (Naganarasimha G R via wangda)" to add correct JIRA number

This reverts commit e0f2379312c48e26b0cb2c1e1e803ef71d1839cf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab1faa4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab1faa4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab1faa4b

Branch: refs/heads/YARN-3926
Commit: ab1faa4ba80702fb04e28ffb23a4b3bb6e64ee16
Parents: e0f2379
Author: Wangda Tan 
Authored: Tue Jan 3 14:53:13 2017 -0800
Committer: Wangda Tan 
Committed: Tue Jan 3 14:53:13 2017 -0800

--
 .../resourcemanager/scheduler/AbstractYarnScheduler.java | 11 +--
 .../scheduler/capacity/CapacityScheduler.java|  4 ++--
 .../resourcemanager/scheduler/fair/FairScheduler.java|  4 ++--
 .../resourcemanager/scheduler/fifo/FifoScheduler.java|  9 +
 4 files changed, 10 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab1faa4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index c1a985d..c0cc6b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -36,7 +36,6 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.yarn.api.records.AbstractResourceRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
@@ -52,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.records.AbstractResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -77,15 +77,14 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReco
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeFinishedContainersPulledByAMEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.SettableFuture;
 
@@ -396,8 +395,8 @@ public abstract class AbstractYarnScheduler
 }
   }
 
-  public void recoverContainersOnNode(List containerReports,
-  RMNode nm) {
+  public synchronized void recoverContainersOnNode(
+  List containerReports, RMNode nm) {
 try {
   writeLock.lock();
   if (!rmContext.isWorkPreservingRecoveryEnabled()
@@ -994,7 +993,7 @@ public abstract class AbstractYarnScheduler
* Process a heartbeat update from a node.
* @param nm The RMNode corresponding to the NodeManager
*/
-  protected void n

[45/50] [abbrv] hadoop git commit: YARN-5586. Update the Resources class to consider all resource types. Contributed by Varun Vasudev.

2017-01-27 Thread vvasudev
YARN-5586. Update the Resources class to consider all resource types. 
Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d23f94d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d23f94d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d23f94d

Branch: refs/heads/YARN-3926
Commit: 3d23f94d8e7a50abbab1a210e281a7295c75f70e
Parents: 39d2350
Author: Rohith Sharma K S 
Authored: Mon Sep 12 10:44:26 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:23:36 2017 +0530

--
 .../api/records/impl/pb/ResourcePBImpl.java |   4 +-
 .../resource/DominantResourceCalculator.java|  36 ++--
 .../yarn/util/resource/ResourceUtils.java   |   3 +-
 .../hadoop/yarn/util/resource/Resources.java| 138 ++--
 .../yarn/util/resource/TestResourceUtils.java   |  23 ++
 .../yarn/util/resource/TestResources.java   | 212 +--
 .../resourcemanager/resource/TestResources.java |  43 
 7 files changed, 369 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d23f94d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 36e5beb..236a763 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -156,7 +156,9 @@ public class ResourcePBImpl extends Resource {
   resourceInformation.setName(resource);
 }
 initResources();
-resources.put(resource, resourceInformation);
+if (resources.containsKey(resource)) {
+  resources.put(resource, resourceInformation);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d23f94d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 6c5406b..af1391d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -181,8 +181,10 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 Long requiredResourceValue = UnitsConversionUtil
 .convert(requiredResource.getUnits(), availableResource.getUnits(),
 requiredResource.getValue());
-Long tmp = availableResource.getValue() / requiredResourceValue;
-min = min < tmp ? min : tmp;
+if (requiredResourceValue != 0) {
+  Long tmp = availableResource.getValue() / requiredResourceValue;
+  min = min < tmp ? min : tmp;
+}
   } catch (YarnException ye) {
 throw new IllegalArgumentException(
 "Error getting resource information for " + resource, ye);
@@ -291,10 +293,11 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 .convert(stepFactorResourceInformation.getUnits(),
 rResourceInformation.getUnits(),
 stepFactorResourceInformation.getValue());
-
-tmp.setValue(
-Math.min(roundUp(Math.max(rValue, minimumValue), stepFactorValue),
-maximumValue));
+Long value = Math.max(rValue, minimumValue);
+if (stepFactorValue != 0) {
+  value = roundUp(value, stepFactorValue);
+}
+tmp.setValue(Math.min(value, maximumValue));
 ret.setResourceInformation(resource, tmp);
   } catch (YarnException ye) {
 throw new IllegalArgumentException(
@@ -330,9 +333,11 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 .convert(stepFactorResourceInformation.getUnits(),
 rResourceInformation.getUnits(),
 stepFactorResourceInformation.getValue());
-
-Long value = roundUp

[47/50] [abbrv] hadoop git commit: YARN-4172. Extend DominantResourceCalculator to account for all resources. (Varun Vasudev via wangda)

2017-01-27 Thread vvasudev
YARN-4172. Extend DominantResourceCalculator to account for all resources. 
(Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8bd30da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8bd30da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8bd30da

Branch: refs/heads/YARN-3926
Commit: a8bd30da2f011c7581fa1e6781ac556e3a2b3e1b
Parents: 09f27e5
Author: Wangda Tan 
Authored: Fri Jan 29 10:53:31 2016 +0800
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:23:36 2017 +0530

--
 .../resource/DominantResourceCalculator.java| 389 +--
 1 file changed, 273 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8bd30da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 9f1c8d7..58a9f15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -22,25 +22,31 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+
+import java.util.HashSet;
+import java.util.Set;
 
 /**
- * A {@link ResourceCalculator} which uses the concept of  
+ * A {@link ResourceCalculator} which uses the concept of
  * dominant resource to compare multi-dimensional resources.
  *
- * Essentially the idea is that the in a multi-resource environment, 
- * the resource allocation should be determined by the dominant share 
- * of an entity (user or queue), which is the maximum share that the 
- * entity has been allocated of any resource. 
- * 
- * In a nutshell, it seeks to maximize the minimum dominant share across 
- * all entities. 
- * 
+ * Essentially the idea is that the in a multi-resource environment,
+ * the resource allocation should be determined by the dominant share
+ * of an entity (user or queue), which is the maximum share that the
+ * entity has been allocated of any resource.
+ *
+ * In a nutshell, it seeks to maximize the minimum dominant share across
+ * all entities.
+ *
  * For example, if user A runs CPU-heavy tasks and user B runs
- * memory-heavy tasks, it attempts to equalize CPU share of user A 
- * with Memory-share of user B. 
- * 
+ * memory-heavy tasks, it attempts to equalize CPU share of user A
+ * with Memory-share of user B.
+ *
  * In the single resource case, it reduces to max-min fairness for that 
resource.
- * 
+ *
  * See the Dominant Resource Fairness paper for more details:
  * www.cs.berkeley.edu/~matei/papers/2011/nsdi_drf.pdf
  */
@@ -50,31 +56,69 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   private static final Log LOG =
   LogFactory.getLog(DominantResourceCalculator.class);
 
+
+  private Set resourceNames;
+
+  public DominantResourceCalculator() {
+resourceNames = new HashSet<>();
+resourceNames.add(ResourceInformation.MEMORY.getName());
+resourceNames.add(ResourceInformation.VCORES.getName());
+  }
+
+  /**
+   * Compare two resources - if the value for every resource type for the lhs
+   * is greater than that of the rhs, return 1. If the value for every resource
+   * type in the lhs is less than the rhs, return -1. Otherwise, return 0
+   *
+   * @param lhs resource to be compared
+   * @param rhs resource to be compared
+   * @return 0, 1, or -1
+   */
+  private int compare(Resource lhs, Resource rhs) {
+boolean lhsGreater = false;
+boolean rhsGreater = false;
+int ret = 0;
+
+for (String rName : resourceNames) {
+  try {
+ResourceInformation lhsResourceInformation =
+lhs.getResourceInformation(rName);
+ResourceInformation rhsResourceInformation =
+rhs.getResourceInformation(rName);
+int diff = lhsResourceInformation.compareTo(rhsResourceInformation);
+if (diff >= 1) {
+  lhsGreater = true;
+} else if (diff <= -1) {
+  rhs

[42/50] [abbrv] hadoop git commit: YARN-4830. Add support for resource types in the nodemanager. Contributed by Varun Vasudev.

2017-01-27 Thread vvasudev
YARN-4830. Add support for resource types in the nodemanager. Contributed by 
Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d702592
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d702592
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d702592

Branch: refs/heads/YARN-3926
Commit: 1d702592516b43e1bbed4fdaf9d481b853d4eff0
Parents: 6ed968a
Author: Varun Vasudev 
Authored: Sat Jun 11 14:33:46 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:23:36 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   |   3 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  19 ++-
 .../FileSystemBasedConfigurationProvider.java   |   3 +-
 .../hadoop/yarn/LocalConfigurationProvider.java |   3 +-
 .../api/records/impl/pb/ResourcePBImpl.java |  53 +++---
 .../yarn/util/resource/ResourceUtils.java   | 168 +++
 .../yarn/util/resource/TestResourceUtils.java   |  29 +++-
 .../resource-types/node-resources-1.xml |  29 
 .../resource-types/node-resources-2.xml |  39 +
 .../nodemanager/NodeStatusUpdaterImpl.java  |   7 +-
 .../util/NodeManagerHardwareUtils.java  |  52 ++
 .../resourcemanager/ResourceTrackerService.java |   9 +-
 12 files changed, 342 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d702592/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 3efccce..3880bb0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -272,7 +272,8 @@ public abstract class Resource implements 
Comparable {
 continue;
   }
   if (entry.getKey().equals(ResourceInformation.VCORES.getName())
-  && entry.getValue().getUnits().equals("")) {
+  && entry.getValue().getUnits()
+  .equals(ResourceInformation.VCORES.getUnits())) {
 continue;
   }
   sb.append(", ").append(entry.getKey()).append(": ")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d702592/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e411659..7959d31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -63,6 +63,10 @@ public class YarnConfiguration extends Configuration {
   "resource-types.xml";
 
   @Private
+  public static final String NODE_RESOURCES_CONFIGURATION_FILE =
+  "node-resources.xml";
+
+  @Private
   public static final List RM_CONFIGURATION_FILES =
   Collections.unmodifiableList(Arrays.asList(
   RESOURCE_TYPES_CONFIGURATION_FILE,
@@ -72,6 +76,16 @@ public class YarnConfiguration extends Configuration {
   YARN_SITE_CONFIGURATION_FILE,
   CORE_SITE_CONFIGURATION_FILE));
 
+  @Private
+  public static final List NM_CONFIGURATION_FILES =
+  Collections.unmodifiableList(Arrays.asList(
+  NODE_RESOURCES_CONFIGURATION_FILE,
+  DR_CONFIGURATION_FILE,
+  CS_CONFIGURATION_FILE,
+  HADOOP_POLICY_CONFIGURATION_FILE,
+  YARN_SITE_CONFIGURATION_FILE,
+  CORE_SITE_CONFIGURATION_FILE));
+
   @Evolving
   public static final int APPLICATION_MAX_TAGS = 10;
 
@@ -100,12 +114,15 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_PREFIX = "yarn.";
 
   /
-  // Scheduler resource types configs
+  // Resource types configs
   
 
   public static final String RESOURCE_TYPES =
   YarnConfiguration.YARN_PREFIX + "resource-types";
 
+  public static final String NM_RESOURCES_PREFIX =
+  YarnConfiguration.NM_PREFIX + "resource-type.";
+
   /** Delay before deleting resource to ease debugging of NM issues */
   public static final String DEBUG_NM

[44/50] [abbrv] hadoop git commit: YARN-4715. Add support to read resource types from a config file. Contributed by Varun Vasudev.

2017-01-27 Thread vvasudev
YARN-4715. Add support to read resource types from a config file. Contributed 
by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/285fce58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/285fce58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/285fce58

Branch: refs/heads/YARN-3926
Commit: 285fce5837f3e442d6ab7b04e7fcb461f8099652
Parents: a8bd30d
Author: Varun Vasudev 
Authored: Fri Mar 11 15:03:15 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:23:36 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   |  24 +-
 .../yarn/api/records/ResourceInformation.java   |   8 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../exceptions/ResourceNotFoundException.java   |   2 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   8 +
 .../api/records/impl/pb/ResourcePBImpl.java |  72 ++
 .../resource/DominantResourceCalculator.java|   5 +-
 .../yarn/util/resource/ResourceUtils.java   | 229 +
 .../hadoop/yarn/util/resource/Resources.java|  18 +-
 .../src/main/resources/yarn-default.xml |  10 +
 .../yarn/util/resource/TestResourceUtils.java   | 248 +++
 .../resource-types/resource-types-1.xml |  18 ++
 .../resource-types/resource-types-2.xml |  29 +++
 .../resource-types/resource-types-3.xml |  24 ++
 .../resource-types/resource-types-4.xml |  34 +++
 .../resource-types/resource-types-error-1.xml   |  29 +++
 .../resource-types/resource-types-error-2.xml   |  29 +++
 .../resource-types/resource-types-error-3.xml   |  29 +++
 .../resource-types/resource-types-error-4.xml   |  24 ++
 19 files changed, 762 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/285fce58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index bbf61aa..3efccce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -74,15 +74,6 @@ public abstract class Resource implements 
Comparable {
 return resource;
   }
 
-  @Public
-  @Stable
-  public static Resource newInstance(
-  Map resources) {
-Resource resource = Records.newRecord(Resource.class);
-resource.setResources(resources);
-return resource;
-  }
-
   /**
* This method is DEPRECATED:
* Use {@link Resource#getMemorySize()} instead
@@ -207,15 +198,6 @@ public abstract class Resource implements 
Comparable {
   public abstract Long getResourceValue(String resource) throws YarnException;
 
   /**
-   * Set the resources to the map specified.
-   *
-   * @param resources Desired resources
-   */
-  @Public
-  @Evolving
-  public abstract void setResources(Map 
resources);
-
-  /**
* Set the ResourceInformation object for a particular resource.
*
* @param resource the resource for which the ResourceInformation is provided
@@ -249,8 +231,8 @@ public abstract class Resource implements 
Comparable {
 result = prime * result + getVirtualCores();
 for (Map.Entry entry : getResources()
 .entrySet()) {
-  if (entry.getKey().equals(ResourceInformation.MEMORY.getName()) || entry
-  .getKey().equals(ResourceInformation.VCORES.getName())) {
+  if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
+  || entry.getKey().equals(ResourceInformation.VCORES.getName())) {
 continue;
   }
   result = prime * result + entry.getValue().hashCode();
@@ -284,7 +266,7 @@ public abstract class Resource implements 
Comparable {
 .append(getVirtualCores());
 for (Map.Entry entry : getResources()
 .entrySet()) {
-  if (entry.getKey().equals(ResourceInformation.MEMORY.getName())
+  if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
   && entry.getValue().getUnits()
   .equals(ResourceInformation.MEMORY_MB.getUnits())) {
 continue;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/285fce58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceI

[43/50] [abbrv] hadoop git commit: YARN-5707. Add manager class for resource profiles. Contributed by Varun Vasudev.

2017-01-27 Thread vvasudev
YARN-5707. Add manager class for resource profiles. Contributed by Varun 
Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac75a134
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac75a134
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac75a134

Branch: refs/heads/YARN-3926
Commit: ac75a134d0b27a273fb7aba996c58c79988b845e
Parents: 3d23f94
Author: Varun Vasudev 
Authored: Sat Oct 8 19:43:33 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:23:36 2017 +0530

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  23 +++
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../src/main/resources/yarn-default.xml |  16 ++
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   5 +
 .../resource/ResourceProfilesManager.java   |  46 +
 .../resource/ResourceProfilesManagerImpl.java   | 176 +++
 .../resource/TestResourceProfiles.java  | 142 +++
 .../resources/profiles/illegal-profiles-1.json  |  10 ++
 .../resources/profiles/illegal-profiles-2.json  |  10 ++
 .../resources/profiles/illegal-profiles-3.json  |  10 ++
 .../resources/profiles/sample-profiles-1.json   |  14 ++
 .../resources/profiles/sample-profiles-2.json   |  26 +++
 12 files changed, 482 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac75a134/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7959d31..a9e929f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -814,6 +814,29 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser.";
 
   /**
+   * Enable/disable resource profiles.
+   */
+  @Public
+  @Unstable
+  public static final String RM_RESOURCE_PROFILES_ENABLED =
+  RM_PREFIX + "resource-profiles.enabled";
+  @Public
+  @Unstable
+  public static final boolean DEFAULT_RM_RESOURCE_PROFILES_ENABLED = false;
+
+  /**
+   * File containing resource profiles.
+   */
+  @Public
+  @Unstable
+  public static final String RM_RESOURCE_PROFILES_SOURCE_FILE =
+  RM_PREFIX + "resource-profiles.source-file";
+  @Public
+  @Unstable
+  public static final String DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE =
+  "resource-profiles.json";
+
+  /**
* Timeout in seconds for YARN node graceful decommission.
* This is the maximal time to wait for running containers and applications
* to complete before transition a DECOMMISSIONING node into DECOMMISSIONED.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac75a134/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 3da4bab..4427390 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -84,6 +84,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 // Used as Java command line properties, not XML
 configurationPrefixToSkipCompare.add("yarn.app.container");
 
+// Ignore default file name for resource profiles
+configurationPropsToSkipCompare
+.add(YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE);
+
 // Ignore NodeManager "work in progress" variables
 configurationPrefixToSkipCompare
 .add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac75a134/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-

[48/50] [abbrv] hadoop git commit: YARN-5708. Implement APIs to get resource profiles from the RM. Contributed by Varun Vasudev.

2017-01-27 Thread vvasudev
YARN-5708. Implement APIs to get resource profiles from the RM. Contributed by 
Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/762de163
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/762de163
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/762de163

Branch: refs/heads/YARN-3926
Commit: 762de1638c0a7b5e0fb4271484b692a44b95a733
Parents: ac75a13
Author: Varun Vasudev 
Authored: Sat Oct 22 20:15:47 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:46:19 2017 +0530

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  13 ++
 .../hadoop/mapred/TestClientRedirect.java   |  17 +++
 .../yarn/api/ApplicationClientProtocol.java |  37 +
 .../GetAllResourceProfilesRequest.java  |  35 +
 .../GetAllResourceProfilesResponse.java |  60 
 .../GetResourceProfileRequest.java  |  59 
 .../GetResourceProfileResponse.java |  68 +
 .../yarn/api/records/ProfileCapability.java |  88 
 .../main/proto/applicationclient_protocol.proto |   2 +
 .../src/main/proto/yarn_protos.proto|  15 ++
 .../src/main/proto/yarn_service_protos.proto|  16 +++
 .../hadoop/yarn/client/api/YarnClient.java  |  25 
 .../yarn/client/api/impl/YarnClientImpl.java|  19 +++
 .../ApplicationClientProtocolPBClientImpl.java  |  36 +
 .../ApplicationClientProtocolPBServiceImpl.java |  42 ++
 .../pb/GetAllResourceProfilesRequestPBImpl.java |  55 +++
 .../GetAllResourceProfilesResponsePBImpl.java   | 142 +++
 .../pb/GetResourceProfileRequestPBImpl.java | 101 +
 .../pb/GetResourceProfileResponsePBImpl.java| 112 +++
 .../impl/pb/ProfileCapabilityPBImpl.java| 134 +
 .../hadoop/yarn/api/TestPBImplRecords.java  |  34 +
 .../amrmproxy/MockResourceManagerFacade.java|  16 +++
 .../server/resourcemanager/ClientRMService.java |  41 ++
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  14 ++
 .../server/resourcemanager/ResourceManager.java |   8 ++
 26 files changed, 1194 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/762de163/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index c302553..6ed0127 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -517,4 +518,16 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 client.killApplication(appId, diagnostics);
   }
+
+  @Override
+  public Map getResourceProfiles()
+  throws YarnException, IOException {
+return client.getResourceProfiles();
+  }
+
+  @Override
+  public Resource getResourceProfile(String profile)
+  throws YarnException, IOException {
+return client.getResourceProfile(profile);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/762de163/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 65eac65..cc50be0 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/sr

[33/50] [abbrv] hadoop git commit: YARN-6025. Fix synchronization issues of AbstractYarnScheduler#nodeUpdate and its implementations. (Naganarasimha G R via wangda)

2017-01-27 Thread vvasudev
YARN-6025. Fix synchronization issues of AbstractYarnScheduler#nodeUpdate and 
its implementations. (Naganarasimha G R via wangda)

(cherry picked from commit e0f2379312c48e26b0cb2c1e1e803ef71d1839cf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f69a107a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f69a107a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f69a107a

Branch: refs/heads/YARN-3926
Commit: f69a107aeccc68ca1085a7be8093d36b2f45eaa1
Parents: ab1faa4
Author: Wangda Tan 
Authored: Tue Jan 3 14:46:24 2017 -0800
Committer: Wangda Tan 
Committed: Tue Jan 3 14:53:36 2017 -0800

--
 .../resourcemanager/scheduler/AbstractYarnScheduler.java | 11 ++-
 .../scheduler/capacity/CapacityScheduler.java|  4 ++--
 .../resourcemanager/scheduler/fair/FairScheduler.java|  4 ++--
 .../resourcemanager/scheduler/fifo/FifoScheduler.java|  9 -
 4 files changed, 18 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f69a107a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index c0cc6b0..c1a985d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -36,6 +36,7 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.AbstractResourceRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
@@ -51,7 +52,6 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
-import org.apache.hadoop.yarn.api.records.AbstractResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -77,14 +77,15 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReco
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeFinishedContainersPulledByAMEvent;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.SettableFuture;
 
@@ -395,8 +396,8 @@ public abstract class AbstractYarnScheduler
 }
   }
 
-  public synchronized void recoverContainersOnNode(
-  List containerReports, RMNode nm) {
+  public void recoverContainersOnNode(List containerReports,
+  RMNode nm) {
 try {
   writeLock.lock();
   if (!rmContext.isWorkPreservingRecoveryEnabled()
@@ -993,7 +994,7 @@ public abstract class AbstractYarnScheduler
* Process a heartbeat update from a node.
* @param nm The RMNode corresponding to the NodeManager
*/
-  protected synchronized void nodeUp

[29/50] [abbrv] hadoop git commit: HADOOP-13922. Some modules have dependencies on hadoop-client jar removed by HADOOP-11804. Contributed by Sean Busbey.

2017-01-27 Thread vvasudev
HADOOP-13922. Some modules have dependencies on hadoop-client jar removed by 
HADOOP-11804. Contributed by Sean Busbey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebdd2e03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebdd2e03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebdd2e03

Branch: refs/heads/YARN-3926
Commit: ebdd2e03b7b8573cc3531958dbfda72cdbc277fd
Parents: 451efb0
Author: Chris Nauroth 
Authored: Tue Jan 3 13:04:50 2017 -0800
Committer: Chris Nauroth 
Committed: Tue Jan 3 13:16:06 2017 -0800

--
 hadoop-client-modules/hadoop-client/pom.xml | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebdd2e03/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index 0394cae..cc527bc 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -23,7 +23,6 @@
 
   hadoop-client
   3.0.0-alpha2-SNAPSHOT
-  pom
 
   Apache Hadoop Client aggregation pom with dependencies 
exposed
   Apache Hadoop Client Aggregator


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HDFS-11274. Datanode should only check the failed volume upon IO errors. Contributed by Xiaoyu Yao.

2017-01-27 Thread vvasudev
HDFS-11274. Datanode should only check the failed volume upon IO errors. 
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/603f3ef1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/603f3ef1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/603f3ef1

Branch: refs/heads/YARN-3926
Commit: 603f3ef1386048111940b66f3a0750ab84d0588f
Parents: ce3613c
Author: Xiaoyu Yao 
Authored: Wed Dec 28 22:08:13 2016 -0800
Committer: Arpit Agarwal 
Committed: Wed Dec 28 22:08:13 2016 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  12 +-
 .../server/datanode/CountingFileIoEvents.java   |   3 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  91 -
 .../server/datanode/DefaultFileIoEvents.java|   2 +-
 .../hdfs/server/datanode/FileIoEvents.java  |  36 --
 .../hdfs/server/datanode/FileIoProvider.java|  89 +++--
 .../server/datanode/ProfilingFileIoEvents.java  |   2 +-
 .../hdfs/server/datanode/ReplicaInfo.java   |   2 +-
 .../server/datanode/checker/AsyncChecker.java   |   5 +-
 .../datanode/checker/DatasetVolumeChecker.java  |  71 ++
 .../checker/StorageLocationChecker.java |   8 +-
 .../datanode/checker/ThrottledAsyncChecker.java |  19 ++-
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   5 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   2 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   3 +-
 .../fsdataset/impl/FsVolumeImplBuilder.java |   4 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|   2 +-
 .../TestDataNodeVolumeFailureReporting.java |  15 ++-
 .../checker/TestDatasetVolumeChecker.java   |  49 ---
 .../TestDatasetVolumeCheckerFailures.java   |  23 
 .../checker/TestThrottledAsyncChecker.java  | 128 +++
 22 files changed, 338 insertions(+), 235 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/603f3ef1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index b3aee11..567597d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -278,10 +278,9 @@ class BlockReceiver implements Closeable {
   IOException cause = DatanodeUtil.getCauseIfDiskError(ioe);
   DataNode.LOG.warn("IOException in BlockReceiver constructor"
   + (cause == null ? "" : ". Cause is "), cause);
-  
-  if (cause != null) { // possible disk error
+  if (cause != null) {
 ioe = cause;
-datanode.checkDiskErrorAsync();
+// Volume error check moved to FileIoProvider
   }
   
   throw ioe;
@@ -363,9 +362,8 @@ class BlockReceiver implements Closeable {
 if (measuredFlushTime) {
   datanode.metrics.addFlushNanos(flushTotalNanos);
 }
-// disk check
 if(ioe != null) {
-  datanode.checkDiskErrorAsync();
+  // Volume error check moved to FileIoProvider
   throw ioe;
 }
   }
@@ -792,7 +790,7 @@ class BlockReceiver implements Closeable {
   manageWriterOsCache(offsetInBlock);
 }
   } catch (IOException iex) {
-datanode.checkDiskErrorAsync();
+// Volume error check moved to FileIoProvider
 throw iex;
   }
 }
@@ -1430,7 +1428,7 @@ class BlockReceiver implements Closeable {
 } catch (IOException e) {
   LOG.warn("IOException in BlockReceiver.run(): ", e);
   if (running) {
-datanode.checkDiskErrorAsync();
+// Volume error check moved to FileIoProvider
 LOG.info(myString, e);
 running = false;
 if (!Thread.interrupted()) { // failure not caused by interruption

http://git-wip-us.apache.org/repos/asf/hadoop/blob/603f3ef1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/CountingFileIoEvents.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/CountingFileIoEvents.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/CountingFileIoEvents.java
index a70c151..7c6bfd6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/CountingFi

[50/50] [abbrv] hadoop git commit: YARN-5587. Add support for resource profiles. (vvasudev via asuresh)

2017-01-27 Thread vvasudev
YARN-5587. Add support for resource profiles. (vvasudev via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4dcd291
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4dcd291
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4dcd291

Branch: refs/heads/YARN-3926
Commit: b4dcd29166953b66de147b200c62d8c1a819a2af
Parents: 762de16
Author: Arun Suresh 
Authored: Tue Nov 15 01:01:07 2016 -0800
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:58:10 2017 +0530

--
 .../dev-support/findbugs-exclude.xml|   4 +
 .../RegisterApplicationMasterResponse.java  |   8 +
 .../yarn/api/records/ProfileCapability.java |  94 ++-
 .../hadoop/yarn/api/records/Resource.java   |  14 ++
 .../yarn/api/records/ResourceInformation.java   |  57 ++-
 .../yarn/api/records/ResourceRequest.java   |  43 -
 .../hadoop-yarn/hadoop-yarn-client/pom.xml  |   1 +
 .../hadoop/yarn/client/api/AMRMClient.java  | 117 +-
 .../yarn/client/api/impl/AMRMClientImpl.java| 152 ++---
 .../client/api/impl/RemoteRequestsTable.java| 109 +
 .../yarn/client/api/impl/TestAMRMClient.java| 147 +++--
 .../impl/TestAMRMClientContainerRequest.java|   8 +-
 .../api/impl/TestDistributedScheduling.java |  12 +-
 .../yarn/client/api/impl/TestNMClient.java  |   5 +-
 .../TestOpportunisticContainerAllocation.java   |  15 +-
 .../src/test/resources/resource-profiles.json   |  18 +++
 ...RegisterApplicationMasterResponsePBImpl.java |  58 +++
 .../api/records/impl/pb/ResourcePBImpl.java |   4 +-
 .../records/impl/pb/ResourceRequestPBImpl.java  |  41 -
 .../yarn/util/resource/ResourceUtils.java   | 161 ++-
 .../hadoop/yarn/util/resource/Resources.java|  10 +-
 .../ApplicationMasterService.java   |   9 ++
 .../server/resourcemanager/RMServerUtils.java   |  50 ++
 .../resource/ResourceProfilesManagerImpl.java   |   4 +
 .../scheduler/AbstractYarnScheduler.java|  44 +
 .../scheduler/ClusterNodeTracker.java   |   3 +-
 .../scheduler/SchedulerUtils.java   |  10 ++
 .../scheduler/capacity/CapacityScheduler.java   |   4 +-
 .../scheduler/fair/FairScheduler.java   |   4 +-
 .../scheduler/fifo/FifoScheduler.java   |  13 +-
 .../yarn/server/resourcemanager/MockRM.java |   2 +
 .../server/resourcemanager/TestAppManager.java  |   1 +
 .../TestApplicationMasterService.java   |  35 
 .../scheduler/fair/TestFairScheduler.java   |   4 +
 .../hadoop/yarn/server/MiniYARNCluster.java |   2 +
 35 files changed, 1099 insertions(+), 164 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4dcd291/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index ab36a4e..de58251 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -151,6 +151,10 @@
 
   
   
+
+
+  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4dcd291/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
index 0b886dd..8fa8563 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
@@ -204,4 +204,12 @@ public abstract class RegisterApplicationMasterResponse {
   @Unstable
   public abstract void setSchedulerResourceTypes(
   EnumSet types);
+
+  @Public
+  @Unstable
+  public abstract Map getResourceProfiles();
+
+  @Private
+  @Unstable
+  public abstract void setResourceProfiles(Map profiles);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4dcd291/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
--
diff --git 
a/hadoop-yarn-projec

[39/50] [abbrv] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-01-27 Thread vvasudev
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09f27e5b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
new file mode 100644
index 000..44880df
--- /dev/null
+++ b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
@@ -0,0 +1,1661 @@
+
+ Apache License
+   Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+  "License" shall mean the terms and conditions for use, reproduction,
+  and distribution as defined by Sections 1 through 9 of this document.
+
+  "Licensor" shall mean the copyright owner or entity authorized by
+  the copyright owner that is granting the License.
+
+  "Legal Entity" shall mean the union of the acting entity and all
+  other entities that control, are controlled by, or are under common
+  control with that entity. For the purposes of this definition,
+  "control" means (i) the power, direct or indirect, to cause the
+  direction or management of such entity, whether by contract or
+  otherwise, or (ii) ownership of fifty percent (50%) or more of the
+  outstanding shares, or (iii) beneficial ownership of such entity.
+
+  "You" (or "Your") shall mean an individual or Legal Entity
+  exercising permissions granted by this License.
+
+  "Source" form shall mean the preferred form for making modifications,
+  including but not limited to software source code, documentation
+  source, and configuration files.
+
+  "Object" form shall mean any form resulting from mechanical
+  transformation or translation of a Source form, including but
+  not limited to compiled object code, generated documentation,
+  and conversions to other media types.
+
+  "Work" shall mean the work of authorship, whether in Source or
+  Object form, made available under the License, as indicated by a
+  copyright notice that is included in or attached to the work
+  (an example is provided in the Appendix below).
+
+  "Derivative Works" shall mean any work, whether in Source or Object
+  form, that is based on (or derived from) the Work and for which the
+  editorial revisions, annotations, elaborations, or other modifications
+  represent, as a whole, an original work of authorship. For the purposes
+  of this License, Derivative Works shall not include works that remain
+  separable from, or merely link (or bind by name) to the interfaces of,
+  the Work and Derivative Works thereof.
+
+  "Contribution" shall mean any work of authorship, including
+  the original version of the Work and any modifications or additions
+  to that Work or Derivative Works thereof, that is intentionally
+  submitted to Licensor for inclusion in the Work by the copyright owner
+  or by an individual or Legal Entity authorized to submit on behalf of
+  the copyright owner. For the purposes of this definition, "submitted"
+  means any form of electronic, verbal, or written communication sent
+  to the Licensor or its representatives, including but not limited to
+  communication on electronic mailing lists, source code control systems,
+  and issue tracking systems that are managed by, or on behalf of, the
+  Licensor for the purpose of discussing and improving the Work, but
+  excluding communication that is conspicuously marked or otherwise
+  designated in writing by the copyright owner as "Not a Contribution."
+
+  "Contributor" shall mean Licensor and any individual or Legal Entity
+  on behalf of whom a Contribution has been received by Licensor and
+  subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  copyright license to reproduce, prepare Derivative Works of,
+  publicly display, publicly perform, sublicense, and distribute the
+  Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  (except as stated in this section) patent license to make, have made,
+  use, offer to sell, sell, import, and otherwise transfer the Work,
+  where such license applies only to those patent claims licensable
+  by such Contributor that are necessarily infringed by their
+  Contribution(s) alone or by c

[46/50] [abbrv] hadoop git commit: YARN-5242. Update DominantResourceCalculator to consider all resource types in calculations. Contributed by Varun Vasudev.

2017-01-27 Thread vvasudev
YARN-5242. Update DominantResourceCalculator to consider all resource types in 
calculations. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39d2350b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39d2350b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39d2350b

Branch: refs/heads/YARN-3926
Commit: 39d2350b6dc578c33e1dab6fbb1eb44715d275c6
Parents: 1d70259
Author: Rohith Sharma K S 
Authored: Tue Jul 26 14:13:03 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:23:36 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   |  7 ++
 .../api/records/impl/pb/ResourcePBImpl.java |  2 +-
 .../resource/DominantResourceCalculator.java| 23 
 .../yarn/util/resource/ResourceUtils.java   |  5 +++--
 .../hadoop/yarn/util/resource/Resources.java|  6 +
 5 files changed, 31 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39d2350b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 3880bb0..4bbca5a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -291,6 +291,8 @@ public abstract class Resource implements 
Comparable {
 otherResources = other.getResources();
 long diff = thisResources.size() - otherResources.size();
 if (diff == 0) {
+  // compare memory and vcores first(in that order) to preserve
+  // existing behaviour
   if (thisResources.keySet().equals(otherResources.keySet())) {
 diff = this.getMemorySize() - other.getMemorySize();
 if (diff == 0) {
@@ -299,6 +301,11 @@ public abstract class Resource implements 
Comparable {
 if (diff == 0) {
   for (Map.Entry entry : thisResources
   .entrySet()) {
+if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
+|| entry.getKey()
+.equals(ResourceInformation.VCORES.getName())) {
+  continue;
+}
 diff =
 entry.getValue().compareTo(otherResources.get(entry.getKey()));
 if (diff != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39d2350b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 8cf7291..36e5beb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -228,7 +228,7 @@ public class ResourcePBImpl extends Resource {
 builder.addResourceValueMap(e);
   }
 }
-builder.setMemory(this.getMemory());
+builder.setMemory(this.getMemorySize());
 builder.setVirtualCores(this.getVirtualCores());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39d2350b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 6dbe754..6c5406b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -387,9 +387,24 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster,
-  

[41/50] [abbrv] hadoop git commit: YARN-4829. Add support for binary units in Resource class.(vvasudev via asuresh)

2017-01-27 Thread vvasudev
YARN-4829. Add support for binary units in Resource class.(vvasudev via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ed968a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ed968a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ed968a1

Branch: refs/heads/YARN-3926
Commit: 6ed968a1fd89bab240b60af56b4d72a9b2e725b5
Parents: 285fce5
Author: Arun Suresh 
Authored: Thu Mar 17 23:50:22 2016 -0700
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:23:36 2017 +0530

--
 .../yarn/api/records/ResourceInformation.java   |  2 +-
 .../hadoop/yarn/util/UnitsConversionUtil.java   | 45 ++--
 .../yarn/util/TestUnitsConversionUtil.java  | 17 +++-
 .../api/records/impl/pb/ResourcePBImpl.java |  2 +-
 .../yarn/util/resource/TestResourceUtils.java   |  2 +-
 5 files changed, 52 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed968a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 80e3192..a17e81b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -36,7 +36,7 @@ public class ResourceInformation implements 
Comparable {
   private static final String VCORES_URI = "vcores";
 
   public static final ResourceInformation MEMORY_MB =
-  ResourceInformation.newInstance(MEMORY_URI, "M");
+  ResourceInformation.newInstance(MEMORY_URI, "Mi");
   public static final ResourceInformation VCORES =
   ResourceInformation.newInstance(VCORES_URI);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed968a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
index 7785263..47bb3df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
@@ -46,7 +46,8 @@ public class UnitsConversionUtil {
   }
 
   private static final String[] UNITS =
-  {"p", "n", "u", "m", "", "k", "M", "G", "T", "P"};
+  { "p", "n", "u", "m", "", "k", "M", "G", "T", "P", "Ki", "Mi", "Gi", 
"Ti",
+  "Pi" };
   private static final List SORTED_UNITS = Arrays.asList(UNITS);
   public static final Set KNOWN_UNITS = createKnownUnitsSet();
   private static final Converter PICO =
@@ -65,6 +66,15 @@ public class UnitsConversionUtil {
   private static final Converter PETA =
   new Converter(1000L * 1000L * 1000L * 1000L * 1000L, 1L);
 
+  private static final Converter KILO_BINARY = new Converter(1024L, 1L);
+  private static final Converter MEGA_BINARY = new Converter(1024L * 1024L, 
1L);
+  private static final Converter GIGA_BINARY =
+  new Converter(1024L * 1024L * 1024L, 1L);
+  private static final Converter TERA_BINARY =
+  new Converter(1024L * 1024L * 1024L * 1024L, 1L);
+  private static final Converter PETA_BINARY =
+  new Converter(1024L * 1024L * 1024L * 1024L * 1024L, 1L);
+
   private static Set createKnownUnitsSet() {
 Set ret = new HashSet<>();
 ret.addAll(Arrays.asList(UNITS));
@@ -93,6 +103,16 @@ public class UnitsConversionUtil {
   return TERA;
 case "P":
   return PETA;
+case "Ki":
+  return KILO_BINARY;
+case "Mi":
+  return MEGA_BINARY;
+case "Gi":
+  return GIGA_BINARY;
+case "Ti":
+  return TERA_BINARY;
+case "Pi":
+  return PETA_BINARY;
 default:
   throw new IllegalArgumentException(
   "Unknown unit '" + unit + "'. Known units are " + KNOWN_UNITS);
@@ -112,28 +132,29 @@ public class UnitsConversionUtil {
 if (toUnit == null || fromUnit == null || fromValue == null) {
   throw new IllegalArgumentException("One or more arguments are null");
 }
-Long tmp;
 String overflowMsg =
 "Converting " + fromValue + " from '" + f

[49/50] [abbrv] hadoop git commit: YARN-5587. Add support for resource profiles. (vvasudev via asuresh)

2017-01-27 Thread vvasudev
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4dcd291/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
index 9890296..39d772a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
@@ -23,8 +23,10 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ProfileCapability;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.proto.YarnProtos.ProfileCapabilityProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
@@ -40,6 +42,7 @@ public class ResourceRequestPBImpl extends  ResourceRequest {
   private Priority priority = null;
   private Resource capability = null;
   private ExecutionTypeRequest executionTypeRequest = null;
+  private ProfileCapability profile = null;
   
   
   public ResourceRequestPBImpl() {
@@ -52,7 +55,7 @@ public class ResourceRequestPBImpl extends  ResourceRequest {
   }
   
   public ResourceRequestProto getProto() {
-  mergeLocalToProto();
+mergeLocalToProto();
 proto = viaProto ? proto : builder.build();
 viaProto = true;
 return proto;
@@ -69,6 +72,9 @@ public class ResourceRequestPBImpl extends  ResourceRequest {
   builder.setExecutionTypeRequest(
   ProtoUtils.convertToProtoFormat(this.executionTypeRequest));
 }
+if (this.profile != null) {
+  builder.setProfile(converToProtoFormat(this.profile));
+}
   }
 
   private void mergeLocalToProto() {
@@ -229,7 +235,8 @@ public class ResourceRequestPBImpl extends  ResourceRequest 
{
 + ", Location: " + getResourceName()
 + ", Relax Locality: " + getRelaxLocality()
 + ", Execution Type Request: " + getExecutionTypeRequest()
-+ ", Node Label Expression: " + getNodeLabelExpression() + "}";
++ ", Node Label Expression: " + getNodeLabelExpression()
++ ", Resource Profile: " + getProfileCapability() + "}";
   }
 
   @Override
@@ -250,4 +257,34 @@ public class ResourceRequestPBImpl extends  
ResourceRequest {
 }
 builder.setNodeLabelExpression(nodeLabelExpression);
   }
+
+  @Override
+  public void setProfileCapability(ProfileCapability profileCapability) {
+maybeInitBuilder();
+if (profile == null) {
+  builder.clearProfile();
+}
+this.profile = profileCapability;
+  }
+
+  @Override
+  public ProfileCapability getProfileCapability() {
+if (profile != null) {
+  return profile;
+}
+ResourceRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasProfile()) {
+  return null;
+}
+return new ProfileCapabilityPBImpl(p.getProfile());
+  }
+
+  private ProfileCapabilityProto converToProtoFormat(
+  ProfileCapability profileCapability) {
+ProfileCapabilityPBImpl tmp = new ProfileCapabilityPBImpl();
+tmp.setProfileName(profileCapability.getProfileName());
+tmp.setProfileCapabilityOverride(
+profileCapability.getProfileCapabilityOverride());
+return tmp.getProto();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4dcd291/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 938e462..86cf872 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache

[26/50] [abbrv] hadoop git commit: HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed by Weiwei Yang.

2017-01-27 Thread vvasudev
HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed 
by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fcc73fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fcc73fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fcc73fc

Branch: refs/heads/YARN-3926
Commit: 7fcc73fc0d248aae1edbd4e1514c5818f6198928
Parents: b31e195
Author: Andrew Wang 
Authored: Tue Jan 3 09:58:00 2017 -0800
Committer: Andrew Wang 
Committed: Tue Jan 3 09:58:00 2017 -0800

--
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  53 
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  62 -
 .../hadoop/hdfs/web/resources/GetOpParam.java   |  12 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |  16 ++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  33 +++
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 192 -
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 276 +++
 7 files changed, 638 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fcc73fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 246f242..4204c54 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -22,6 +22,7 @@ import com.fasterxml.jackson.databind.ObjectReader;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.FileChecksum;
@@ -637,4 +638,56 @@ class JsonUtilClient {
 }
   }
 
+  static BlockLocation[] toBlockLocationArray(Map json)
+  throws IOException{
+final Map rootmap =
+(Map)json.get(BlockLocation.class.getSimpleName() + "s");
+final List array = JsonUtilClient.getList(rootmap,
+BlockLocation.class.getSimpleName());
+
+Preconditions.checkNotNull(array);
+final BlockLocation[] locations = new BlockLocation[array.size()];
+int i = 0;
+for (Object object : array) {
+  final Map m = (Map) object;
+  locations[i++] = JsonUtilClient.toBlockLocation(m);
+}
+return locations;
+  }
+
+  /** Convert a Json map to BlockLocation. **/
+  static BlockLocation toBlockLocation(Map m)
+  throws IOException{
+if(m == null) {
+  return null;
+}
+
+long length = ((Number) m.get("length")).longValue();
+long offset = ((Number) m.get("offset")).longValue();
+boolean corrupt = Boolean.
+getBoolean(m.get("corrupt").toString());
+String[] storageIds = toStringArray(getList(m, "storageIds"));
+String[] cachedHosts = toStringArray(getList(m, "cachedHosts"));
+String[] hosts = toStringArray(getList(m, "hosts"));
+String[] names = toStringArray(getList(m, "names"));
+String[] topologyPaths = toStringArray(getList(m, "topologyPaths"));
+StorageType[] storageTypes = toStorageTypeArray(
+getList(m, "storageTypes"));
+return new BlockLocation(names, hosts, cachedHosts,
+topologyPaths, storageIds, storageTypes,
+offset, length, corrupt);
+  }
+
+  static String[] toStringArray(List list) {
+if (list == null) {
+  return null;
+} else {
+  final String[] array = new String[list.size()];
+  int i = 0;
+  for (Object object : list) {
+array[i++] = object.toString();
+  }
+  return array;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fcc73fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 26cfc01..d4fa009 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1611,14 +1611,68 @@ public class WebHdfsFileSystem extends FileSystem
   final long offset, final long length) thro

[36/50] [abbrv] hadoop git commit: HADOOP-12733. Remove references to obsolete io.seqfile configuration variables. Contributed by Ray Chiang.

2017-01-27 Thread vvasudev
HADOOP-12733. Remove references to obsolete io.seqfile configuration variables. 
Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01d31fe9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01d31fe9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01d31fe9

Branch: refs/heads/YARN-3926
Commit: 01d31fe9389ccdc153d7f4bf6574bf8e509867c1
Parents: 87bb1c4
Author: Akira Ajisaka 
Authored: Wed Jan 4 14:10:36 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Jan 4 14:10:36 2017 +0900

--
 .../src/main/resources/core-default.xml | 16 
 .../hadoop/conf/TestCommonConfigurationFields.java  |  2 --
 .../test/resources/job_1329348432655_0001_conf.xml  |  2 --
 .../src/main/data/2jobs2min-rumen-jh.json   |  6 --
 4 files changed, 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d31fe9/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b4a34db..ee2cc2e 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1262,22 +1262,6 @@
   
 
 
-
-  io.seqfile.lazydecompress
-  true
-  Should values of block-compressed SequenceFiles be decompressed
-  only when necessary.
-  
-
-
-
-  io.seqfile.sorter.recordlimit
-  100
-  The limit on number of records to be kept in memory in a spill
-  in SequenceFiles.Sorter
-  
-
-
  
   io.mapfile.bloom.size
   1048576

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d31fe9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 571dfae..a3a4026 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -124,8 +124,6 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare.add("dr.who");
 
 // XML deprecated properties.
-xmlPropsToSkipCompare.add("io.seqfile.lazydecompress");
-xmlPropsToSkipCompare.add("io.seqfile.sorter.recordlimit");
 // - org.apache.hadoop.hdfs.client.HdfsClientConfigKeys
 xmlPropsToSkipCompare
 .add("io.bytes.per.checksum");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d31fe9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
index e4619d6..4c73e8b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
@@ -16,7 +16,6 @@
 mapreduce.reduce.shuffle.memory.limit.percent0.25
 hadoop.http.authentication.kerberos.keytab${user.home}/hadoop.keytab
 yarn.nodemanager.keytab/etc/krb5.keytab
-io.seqfile.sorter.recordlimit100
 mapreduce.task.io.sort.factor10
 yarn.nodemanager.disk-health-checker.interval-ms12
 mapreduce.job.working.dirhdfs://localhost:8021/user/user
@@ -102,7 +101,6 @@
 dfs.client.block.write.retries3
 hadoop.proxyuser.user.groupsusers
 dfs.namenode.name.dir.restorefalse
-io.seqfile.lazydecompresstrue
 mapreduce.reduce.merge.inmem.threshold1000
 mapreduce.input.fileinputformat.split.minsize0
 dfs.replication3

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d31fe9/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
--
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json 
b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index 095cfd5..c252539 100644
--- a/hadoop-tools/hadoop-sls/src/mai

[31/50] [abbrv] hadoop git commit: Fix synchronization issues of AbstractYarnScheduler#nodeUpdate and its implementations. (Naganarasimha G R via wangda)

2017-01-27 Thread vvasudev
Fix synchronization issues of AbstractYarnScheduler#nodeUpdate and its 
implementations. (Naganarasimha G R via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0f23793
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0f23793
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0f23793

Branch: refs/heads/YARN-3926
Commit: e0f2379312c48e26b0cb2c1e1e803ef71d1839cf
Parents: 88731c7
Author: Wangda Tan 
Authored: Tue Jan 3 14:46:24 2017 -0800
Committer: Wangda Tan 
Committed: Tue Jan 3 14:46:41 2017 -0800

--
 .../resourcemanager/scheduler/AbstractYarnScheduler.java | 11 ++-
 .../scheduler/capacity/CapacityScheduler.java|  4 ++--
 .../resourcemanager/scheduler/fair/FairScheduler.java|  4 ++--
 .../resourcemanager/scheduler/fifo/FifoScheduler.java|  9 -
 4 files changed, 18 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0f23793/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index c0cc6b0..c1a985d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -36,6 +36,7 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.AbstractResourceRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
@@ -51,7 +52,6 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
-import org.apache.hadoop.yarn.api.records.AbstractResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -77,14 +77,15 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReco
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeFinishedContainersPulledByAMEvent;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.SettableFuture;
 
@@ -395,8 +396,8 @@ public abstract class AbstractYarnScheduler
 }
   }
 
-  public synchronized void recoverContainersOnNode(
-  List containerReports, RMNode nm) {
+  public void recoverContainersOnNode(List containerReports,
+  RMNode nm) {
 try {
   writeLock.lock();
   if (!rmContext.isWorkPreservingRecoveryEnabled()
@@ -993,7 +994,7 @@ public abstract class AbstractYarnScheduler
* Process a heartbeat update from a node.
* @param nm The RMNode corresponding to the NodeManager
*/
-  protected synchronized void nodeUpdate(RMNode nm) {
+  protected void nodeUpdate(RMNode nm) {
 if (LOG.isDebugE

[27/50] [abbrv] hadoop git commit: YARN-5529. Create new DiskValidator class with metrics (yufeigu via rkanter)

2017-01-27 Thread vvasudev
YARN-5529. Create new DiskValidator class with metrics (yufeigu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/591fb159
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/591fb159
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/591fb159

Branch: refs/heads/YARN-3926
Commit: 591fb159444037bf4cb651aa1228914f5d71e1bf
Parents: 7fcc73f
Author: Robert Kanter 
Authored: Tue Jan 3 12:13:32 2017 -0800
Committer: Robert Kanter 
Committed: Tue Jan 3 12:13:32 2017 -0800

--
 .../hadoop/metrics2/lib/MutableQuantiles.java   |  10 ++
 .../hadoop/util/DiskValidatorFactory.java   |   5 +-
 .../hadoop/util/ReadWriteDiskValidator.java |  95 +++
 .../util/ReadWriteDiskValidatorMetrics.java | 170 +++
 .../hadoop/metrics2/impl/MetricsRecords.java|  16 ++
 .../hadoop/util/TestReadWriteDiskValidator.java | 161 ++
 6 files changed, 456 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/591fb159/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
index 5b12370..cc32975 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
@@ -144,6 +144,16 @@ public class MutableQuantiles extends MutableMetric {
 scheduledTask = null;
   }
 
+  /**
+   * Get the quantile estimator.
+   *
+   * @return the quantile estimator
+   */
+  @VisibleForTesting
+  public synchronized QuantileEstimator getEstimator() {
+return estimator;
+  }
+
   public synchronized void setEstimator(QuantileEstimator quantileEstimator) {
 this.estimator = quantileEstimator;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/591fb159/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java
index 29ab2ad..7d04db2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskValidatorFactory.java
@@ -62,7 +62,8 @@ public final class DiskValidatorFactory {
 
   /**
* Returns {@link DiskValidator} instance corresponding to its name.
-   * The diskValidator parameter can be "basic" for {@link BasicDiskValidator}.
+   * The diskValidator parameter can be "basic" for {@link BasicDiskValidator}
+   * or "read-write" for {@link ReadWriteDiskValidator}.
* @param diskValidator canonical class name, for example, "basic"
* @throws DiskErrorException if the class cannot be located
*/
@@ -74,6 +75,8 @@ public final class DiskValidatorFactory {
 
 if (diskValidator.equalsIgnoreCase(BasicDiskValidator.NAME)) {
   clazz = BasicDiskValidator.class;
+} else if (diskValidator.equalsIgnoreCase(ReadWriteDiskValidator.NAME)) {
+  clazz = ReadWriteDiskValidator.class;
 } else {
   try {
 clazz = Class.forName(diskValidator);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/591fb159/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidator.java
new file mode 100644
index 000..d80bb45
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidator.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless req

[10/50] [abbrv] hadoop git commit: YARN-6001. Improve moveApplicationQueues command line. Contributed by Sunil G.

2017-01-27 Thread vvasudev
YARN-6001. Improve moveApplicationQueues command line. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e297be74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e297be74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e297be74

Branch: refs/heads/YARN-3926
Commit: e297be74ed4c4334bf48dd01d3b10e302147041d
Parents: 0840b43
Author: Rohith Sharma K S 
Authored: Wed Dec 28 12:16:11 2016 +0530
Committer: Rohith Sharma K S 
Committed: Wed Dec 28 12:16:11 2016 +0530

--
 .../hadoop/yarn/client/cli/ApplicationCLI.java  | 16 -
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 62 +++-
 2 files changed, 76 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e297be74/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index efe5921..893348a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -88,6 +88,7 @@ public class ApplicationCLI extends YarnCLI {
   public static final String APP_ID = "appId";
   public static final String UPDATE_PRIORITY = "updatePriority";
   public static final String UPDATE_LIFETIME = "updateLifetime";
+  public static final String CHANGE_APPLICATION_QUEUE = "changeQueue";
 
   private boolean allAppStates;
 
@@ -114,7 +115,7 @@ public class ApplicationCLI extends YarnCLI {
   + "based on application state and -appTags to filter applications "
   + "based on application tag.");
   opts.addOption(MOVE_TO_QUEUE_CMD, true, "Moves the application to a "
-  + "different queue.");
+  + "different queue. Deprecated command. Use 'changeQueue' instead.");
   opts.addOption(QUEUE_CMD, true, "Works with the movetoqueue command to"
   + " specify which queue to move an application to.");
   opts.addOption(HELP_CMD, false, "Displays help for all commands.");
@@ -146,6 +147,11 @@ public class ApplicationCLI extends YarnCLI {
   opts.addOption(UPDATE_LIFETIME, true,
   "update timeout of an application from NOW. ApplicationId can be"
   + " passed using 'appId' option. Timeout value is in seconds.");
+  opts.addOption(CHANGE_APPLICATION_QUEUE, true,
+  "Moves application to a new queue. ApplicationId can be"
+  + " passed using 'appId' option. 'movetoqueue' command is"
+  + " deprecated, this new command 'changeQueue' performs same"
+  + " functionality.");
   Option killOpt = new Option(KILL_CMD, true, "Kills the application. "
   + "Set of applications can be provided separated with space");
   killOpt.setValueSeparator(' ');
@@ -158,6 +164,7 @@ public class ApplicationCLI extends YarnCLI {
   opts.getOption(APP_ID).setArgName("Application ID");
   opts.getOption(UPDATE_PRIORITY).setArgName("Priority");
   opts.getOption(UPDATE_LIFETIME).setArgName("Timeout");
+  opts.getOption(CHANGE_APPLICATION_QUEUE).setArgName("Queue Name");
 } else if (args.length > 0 && 
args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) {
   title = APPLICATION_ATTEMPT;
   opts.addOption(STATUS_CMD, true,
@@ -315,6 +322,13 @@ public class ApplicationCLI extends YarnCLI {
 
   updateApplicationTimeout(cliParser.getOptionValue(APP_ID),
   ApplicationTimeoutType.LIFETIME, timeoutInSec);
+} else if (cliParser.hasOption(CHANGE_APPLICATION_QUEUE)) {
+  if (!cliParser.hasOption(APP_ID)) {
+printUsage(title, opts);
+return exitCode;
+  }
+  moveApplicationAcrossQueues(cliParser.getOptionValue(APP_ID),
+  cliParser.getOptionValue(CHANGE_APPLICATION_QUEUE));
 } else if (cliParser.hasOption(SIGNAL_CMD)) {
   if (args.length < 3 || args.length > 4) {
 printUsage(title, opts);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e297be74/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apach

[12/50] [abbrv] hadoop git commit: YARN-5719. Enforce a C standard for native container-executor. Contributed by Chris Douglas.

2017-01-27 Thread vvasudev
YARN-5719. Enforce a C standard for native container-executor. Contributed by 
Chris Douglas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/972da46c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/972da46c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/972da46c

Branch: refs/heads/YARN-3926
Commit: 972da46cb48725ad49d3e0a033742bd1a8228f51
Parents: f6715b2
Author: Varun Vasudev 
Authored: Wed Dec 28 14:59:57 2016 +0530
Committer: Varun Vasudev 
Committed: Wed Dec 28 14:59:57 2016 +0530

--
 .../src/CMakeLists.txt  | 16 
 .../container-executor/impl/container-executor.c| 12 
 2 files changed, 24 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/972da46c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index fbc794c..f7fe83d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -26,6 +26,22 @@ include(HadoopCommon)
 string(REGEX MATCH . HCD_ONE "${HADOOP_CONF_DIR}")
 string(COMPARE EQUAL ${HCD_ONE} / HADOOP_CONF_DIR_IS_ABS)
 
+if (CMAKE_VERSION VERSION_LESS "3.1")
+  # subset of CMAKE__COMPILER_ID
+  # https://cmake.org/cmake/help/v3.0/variable/CMAKE_LANG_COMPILER_ID.html
+  if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR
+  CMAKE_C_COMPILER_ID STREQUAL "Clang" OR
+  CMAKE_C_COMPILER_ID STREQUAL "AppleClang")
+set (CMAKE_C_FLAGS "-std=c99 -Wall -pedantic-errors ${CMAKE_C_FLAGS}")
+  elseif (CMAKE_C_COMPILER_ID STREQUAL "Intel")
+set (CMAKE_C_FLAGS "-std=c99 -Wall ${CMAKE_C_FLAGS}")
+  elseif (CMAKE_C_COMPILER_ID STREQUAL "SunPro")
+set (CMAKE_C_FLAGS "-xc99 ${CMAKE_C_FLAGS}")
+  endif ()
+else ()
+  set (CMAKE_C_STANDARD 99)
+endif ()
+
 # Note: can't use -D_FILE_OFFSET_BITS=64, see MAPREDUCE-4258
 string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
 string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_CXX_FLAGS 
"${CMAKE_CXX_FLAGS}")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/972da46c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 25f01ea..9be8cf4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -722,14 +722,18 @@ static int create_container_directories(const char* user, 
const char *app_id,
  * Load the user information for a given user name.
  */
 static struct passwd* get_user_info(const char* user) {
-  int string_size = sysconf(_SC_GETPW_R_SIZE_MAX);
+  size_t string_size = sysconf(_SC_GETPW_R_SIZE_MAX);
   struct passwd *result = NULL;
   if(string_size < 1024) {
 string_size = 1024;
   }
-  void* buffer = malloc(string_size + sizeof(struct passwd));
-  if (getpwnam_r(user, buffer, buffer + sizeof(struct passwd), string_size,
-&result) != 0) {
+  struct passwd* buffer = malloc(sizeof(struct passwd) + string_size);
+  if (NULL == buffer) {
+fprintf(LOGFILE, "Failed malloc in get_user_info");
+return NULL;
+  }
+  if (getpwnam_r(user, buffer, ((char*)buffer) + sizeof(struct passwd),
+string_size, &result) != 0) {
 free(buffer);
 fprintf(LOGFILE, "Can't get user information %s - %s\n", user,
strerror(errno));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-01-27 Thread vvasudev
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09f27e5b/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
new file mode 100644
index 000..63fbc9d
--- /dev/null
+++ b/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
@@ -0,0 +1,283 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
+
+The binary distribution of this product bundles binaries of
+org.iq80.leveldb:leveldb-api (https://github.com/dain/leveldb), which has the
+following notices:
+* Copyright 2011 Dain Sundstrom 
+* Copyright 2011 FuseSource Corp. http://fusesource.com
+
+The binary distribution of this product bundles binaries of
+org.fusesource.hawtjni:hawtjni-runtime (https://github.com/fusesource/hawtjni),
+which has the following notices:
+* This product includes software developed by FuseSource Corp.
+  http://fusesource.com
+* This product includes software developed at
+  Progress Software Corporation and/or its  subsidiaries or affiliates.
+* This product includes software developed by IBM Corporation and others.
+
+The binary distribution of this product bundles binaries of
+AWS Java SDK 1.10.6,
+which has the following notices:
+ * This software includes third party software subject to the following
+ copyrights: - XML parsing and utility functions from JetS3t - Copyright
+ 2006-2009 James Murty. - JSON parsing and utility functions from JSON.org -
+ Copyright 2002 JSON.org. - PKCS#1 PEM encoded private key parsing and utility
+ functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc.
+
+The binary distribution of this product bundles binaries of
+Gson 2.2.4,
+which has the following notices:
+
+The Netty Project
+=
+
+Please visit the Netty web site for more information:
+
+  * http://netty.io/
+
+Copyright 2014 The Netty Project
+
+The Netty Project licenses this file to you under the Apache License,
+version 2.0 (the "License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at:
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+Also, please refer to each LICENSE..txt file, which is located in
+the 'license' directory of the distribution file, for the license terms of the
+components that this product depends on.
+
+---
+This product contains the extensions to Java Collections Framework which has
+been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+  * LICENSE:
+* license/LICENSE.jsr166y.txt (Public Domain)
+  * HOMEPAGE:
+* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+* 
http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+This product contains a modified version of Robert Harder's Public Domain
+Base64 Encoder and Decoder, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.base64.txt (Public Domain)
+  * HOMEPAGE:
+* http://iharder.sourceforge.net/current/java/base64/
+
+This product contains a modified portion of 'Webbit', an event based
+WebSocket and HTTP server, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.webbit.txt (BSD License)
+  * HOMEPAGE:
+* https://github.com/joewalnes/webbit
+
+This product contains a modified portion of 'SLF4J', a simple logging
+facade for Java, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.slf4j.txt (MIT License)
+  * HOMEPAGE:
+* http://www.slf4j.org/
+
+This product contains a modified portion of 'ArrayDeque', written by Josh
+Bloch of Google, Inc:
+
+  * LICENSE:
+* license/LICENSE.deque.txt (Public Domain)
+
+This product contains a modified portion of 'Apache Harmony', an open source
+Java SE, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.harmony.txt (Apache License 2.0)
+  * HOMEPAGE:
+* http://archive.apache.org/dist/harmony/
+
+This product contains a modified version of Roland Kuhn's ASL2
+AbstractNodeQueue, which is based on Dmitriy Vyukov's non-intrusive MPSC queue.
+It can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.abstractnodequeue.txt (Public Domain)
+  * HOMEPAGE:
+* 
https://github.com/akka/akka/blob/wip-2.2.3-for-scala-2.11/akka-actor/src/main/java/akka/dispatch/AbstractNodeQueue.java
+
+This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+and decompression library written by Matthew J.

[14/50] [abbrv] hadoop git commit: YARN-4882. Change the log level to DEBUG for recovering completed applications (templedf via rkanter)

2017-01-27 Thread vvasudev
YARN-4882. Change the log level to DEBUG for recovering completed applications 
(templedf via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f216276d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f216276d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f216276d

Branch: refs/heads/YARN-3926
Commit: f216276d2164c6564632c571fd3adbb03bc8b3e4
Parents: 9ca54f4
Author: Robert Kanter 
Authored: Wed Dec 28 15:21:52 2016 -0800
Committer: Robert Kanter 
Committed: Wed Dec 28 15:21:52 2016 -0800

--
 .../server/resourcemanager/RMAppManager.java| 13 ++--
 .../server/resourcemanager/rmapp/RMAppImpl.java | 31 +++-
 .../rmapp/attempt/RMAppAttemptImpl.java | 27 +
 3 files changed, 57 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f216276d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 4d628ee..bc21952 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -488,8 +488,17 @@ public class RMAppManager implements 
EventHandler,
 Map appStates =
 state.getApplicationState();
 LOG.info("Recovering " + appStates.size() + " applications");
-for (ApplicationStateData appState : appStates.values()) {
-  recoverApplication(appState, state);
+
+int count = 0;
+
+try {
+  for (ApplicationStateData appState : appStates.values()) {
+recoverApplication(appState, state);
+count += 1;
+  }
+} finally {
+  LOG.info("Successfully recovered " + count  + " out of "
+  + appStates.size() + " applications");
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f216276d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index a647969..4db595e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -128,6 +128,10 @@ public class RMAppImpl implements RMApp, Recoverable {
   private static final EnumSet COMPLETED_APP_STATES =
   EnumSet.of(RMAppState.FINISHED, RMAppState.FINISHING, RMAppState.FAILED,
   RMAppState.KILLED, RMAppState.FINAL_SAVING, RMAppState.KILLING);
+  private static final String STATE_CHANGE_MESSAGE =
+  "%s State change from %s to %s on event = %s";
+  private static final String RECOVERY_MESSAGE =
+  "Recovering app: %s with %d attempts and final state = %s";
 
   // Immutable fields
   private final ApplicationId applicationId;
@@ -905,9 +909,16 @@ public class RMAppImpl implements RMApp, Recoverable {
 /* TODO fail the application on the failed transition */
   }
 
-  if (oldState != getState()) {
-LOG.info(appID + " State change from " + oldState + " to "
-+ getState() + " on event=" + event.getType());
+  // Log at INFO if we're not recovering or not in a terminal state.
+  // Log at DEBUG otherwise.
+  if ((oldState != getState()) &&
+  (((recoveredFinalState == null)) ||
+(event.getType() != RMAppEventType.RECOVER))) {
+LOG.info(String.format(STATE_CHANGE_MESSAGE, appID, oldState,
+getState(), event.getType()));
+  } else if ((oldState != getState()) && LOG.isDe

[22/50] [abbrv] hadoop git commit: YARN-5931. Document timeout interfaces CLI and REST APIs (Contributed by Rohith Sharma K S via Daniel Templeton)

2017-01-27 Thread vvasudev
YARN-5931. Document timeout interfaces CLI and REST APIs (Contributed by Rohith 
Sharma K S via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/165d01a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/165d01a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/165d01a7

Branch: refs/heads/YARN-3926
Commit: 165d01a73e5f328108a9e876c7a751a3c2cec4a8
Parents: c6a5b68
Author: Daniel Templeton 
Authored: Fri Dec 30 08:41:52 2016 -0800
Committer: Daniel Templeton 
Committed: Fri Dec 30 08:41:52 2016 -0800

--
 .../yarn/api/records/ApplicationTimeout.java|  12 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   2 +-
 .../src/main/resources/yarn-default.xml |   2 +-
 .../rmapp/monitor/RMAppLifetimeMonitor.java |   4 +-
 .../src/site/markdown/ResourceManagerRest.md| 250 +++
 .../src/site/markdown/YarnCommands.md   |   1 +
 6 files changed, 266 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/165d01a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeout.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeout.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeout.java
index 4beb11e..444a270 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeout.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeout.java
@@ -28,9 +28,19 @@ import org.apache.hadoop.yarn.util.Records;
  * 
  * {@link ApplicationTimeoutType} of the timeout type.
  * Expiry time in ISO8601 standard with format
- * -MM-dd'T'HH:mm:ss.SSSZ.
+ * -MM-dd'T'HH:mm:ss.SSSZ or "UNLIMITED".
  * Remaining time in seconds.
  * 
+ * The possible values for {ExpiryTime, RemainingTimeInSeconds} are
+ * 
+ * {UNLIMITED,-1} : Timeout is not configured for given timeout type
+ * (LIFETIME).
+ * {ISO8601 date string, 0} : Timeout is configured and application has
+ * completed.
+ * {ISO8601 date string, greater than zero} : Timeout is configured and
+ * application is RUNNING. Application will be timed out after configured
+ * value.
+ * 
  */
 @Public
 @Unstable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/165d01a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bf0a0ab..d0ade22 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1521,7 +1521,7 @@ public class YarnConfiguration extends Configuration {
   RM_PREFIX + "application-timeouts.monitor.interval-ms";
 
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
-  6;
+  3000;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/165d01a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 9d73308..3c30ed3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2996,7 +2996,7 @@
 The RMAppLifetimeMonitor Service uses this value as monitor interval
 
 yarn.resourcemanager.application-timeouts.monitor.interval-ms
-6
+3000
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/165d01a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-ya

[09/50] [abbrv] hadoop git commit: YARN-5756. Add state-machine implementation for scheduler queues. (Xuan Gong via wangda)

2017-01-27 Thread vvasudev
YARN-5756. Add state-machine implementation for scheduler queues. (Xuan Gong 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0840b432
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0840b432
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0840b432

Branch: refs/heads/YARN-3926
Commit: 0840b4329b2428b20b862f70d72cbdcd6d1618ed
Parents: 0ddb8de
Author: Wangda Tan 
Authored: Tue Dec 27 21:18:24 2016 -0800
Committer: Wangda Tan 
Committed: Tue Dec 27 21:18:33 2016 -0800

--
 .../hadoop/yarn/api/records/QueueState.java |  11 +-
 .../src/main/proto/yarn_protos.proto|   1 +
 .../scheduler/QueueStateManager.java| 100 
 .../scheduler/SchedulerQueue.java   |  69 
 .../scheduler/SchedulerQueueManager.java|   3 +-
 .../scheduler/capacity/AbstractCSQueue.java | 119 +++---
 .../scheduler/capacity/CSQueue.java |   4 +-
 .../scheduler/capacity/CapacityScheduler.java   |   5 +
 .../capacity/CapacitySchedulerContext.java  |   2 +
 .../capacity/CapacitySchedulerQueueManager.java |  13 ++
 .../scheduler/capacity/LeafQueue.java   |  19 ++-
 .../scheduler/capacity/ParentQueue.java |  27 +++-
 .../scheduler/capacity/TestLeafQueue.java   |  11 ++
 .../scheduler/capacity/TestQueueState.java  | 104 
 .../capacity/TestQueueStateManager.java | 162 +++
 15 files changed, 621 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0840b432/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
index 2bc0407..86fd8b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
@@ -29,6 +29,10 @@ import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
  * 
  *   {@link #RUNNING} - normal state.
  *   {@link #STOPPED} - not accepting new application submissions.
+ *   
+ * {@link #DRAINING} - not accepting new application submissions
+ * and waiting for applications finish.
+ *   
  * 
  * 
  * @see QueueInfo
@@ -41,7 +45,12 @@ public enum QueueState {
* Stopped - Not accepting submissions of new applications.
*/
   STOPPED,
-  
+
+  /**
+   * Draining - Not accepting submissions of new applications,
+   * and waiting for applications finish.
+   */
+  DRAINING,
   /**
* Running - normal operation.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0840b432/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 5a70298..a8ba740 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -427,6 +427,7 @@ message YarnClusterMetricsProto {
 enum QueueStateProto {
   Q_STOPPED = 1;
   Q_RUNNING = 2;
+  Q_DRAINING = 3;
 }
 
 message QueueStatisticsProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0840b432/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueStateManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueStateManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueStateManager.java
new file mode 100644
index 000..761817e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueStateManager.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this 

[19/50] [abbrv] hadoop git commit: HDFS-11275. Check groupEntryIndex and throw a helpful exception on failures when removing ACL.

2017-01-27 Thread vvasudev
HDFS-11275. Check groupEntryIndex and throw a helpful exception on failures 
when removing ACL.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7699575
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7699575
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7699575

Branch: refs/heads/YARN-3926
Commit: e76995755629579c7f83fbb417afde20ea747980
Parents: a4f6665
Author: Xiao Chen 
Authored: Thu Dec 29 10:36:50 2016 -0800
Committer: Xiao Chen 
Committed: Thu Dec 29 10:36:50 2016 -0800

--
 .../org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7699575/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 25ca09b..efededd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
@@ -221,7 +222,10 @@ class FSDirAclOp {
   int groupEntryIndex = Collections.binarySearch(
   featureEntries, groupEntryKey,
   AclTransformation.ACL_ENTRY_COMPARATOR);
-  assert groupEntryIndex >= 0;
+  Preconditions.checkPositionIndex(groupEntryIndex, featureEntries.size(),
+  "Invalid group entry index after binary-searching inode: " +
+  inode.getFullPathName() + "(" + inode.getId() + ") "
+  + "with featureEntries:" + featureEntries);
   FsAction groupPerm = featureEntries.get(groupEntryIndex).getPermission();
   FsPermission newPerm = new FsPermission(perm.getUserAction(), groupPerm,
   perm.getOtherAction(), perm.getStickyBit());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: YARN-5906. Update AppSchedulingInfo to use SchedulingPlacementSet. Contributed by Wangda Tan.

2017-01-27 Thread vvasudev
YARN-5906. Update AppSchedulingInfo to use SchedulingPlacementSet. Contributed 
by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ca54f48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ca54f48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ca54f48

Branch: refs/heads/YARN-3926
Commit: 9ca54f4810de182195263bd594afb56dab564105
Parents: 972da46
Author: Sunil G 
Authored: Wed Dec 28 22:48:19 2016 +0530
Committer: Sunil G 
Committed: Wed Dec 28 22:48:19 2016 +0530

--
 .../scheduler/AppSchedulingInfo.java| 437 +--
 .../LocalitySchedulingPlacementSet.java | 311 +
 .../placement/SchedulingPlacementSet.java   |  22 +-
 .../TestApplicationLimitsByPartition.java   |   6 +
 4 files changed, 451 insertions(+), 325 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ca54f48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 30f7ef9..0551df1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -18,22 +18,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.commons.collections.IteratorUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -48,16 +32,30 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.LocalitySchedulingPlacementSet;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.ResourceRequestUpdateResult;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SchedulingPlacementSet;
 
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
 /**
  * This class keeps track of all the consumption of an application. This also
  * keeps track of current running/completed containers for the application.
@@ -89,8 +87,8 @@ public class AppSchedulingInfo {
 
   private final ConcurrentSkipListMap
   schedulerKeys = new ConcurrentSkipListMap<>();
-  final Map>
-  resourceRequestMap = new ConcurrentHashMap<>();
+  final Map>
+  schedulerKeyToPlacementSets = new ConcurrentHashMap<>();
   final Map>> containerIncreaseRequestMap =
   new ConcurrentHashMa

[18/50] [abbrv] hadoop git commit: HDFS-11267. Avoid redefinition of storageDirs in NNStorage and cleanup its accessors in Storage. (Manoj Govindassamy via lei)

2017-01-27 Thread vvasudev
HDFS-11267. Avoid redefinition of storageDirs in NNStorage and cleanup its 
accessors in Storage. (Manoj Govindassamy via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4f66655
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4f66655
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4f66655

Branch: refs/heads/YARN-3926
Commit: a4f66655ec22ca8c960f971f2b0cdafbd3430ad7
Parents: e9f1396
Author: Lei Xu 
Authored: Thu Dec 29 16:57:40 2016 +0800
Committer: Lei Xu 
Committed: Thu Dec 29 16:57:40 2016 +0800

--
 .../org/apache/hadoop/hdfs/server/common/Storage.java |  8 ++--
 .../hdfs/server/datanode/BlockPoolSliceStorage.java   |  6 +++---
 .../hadoop/hdfs/server/datanode/DataStorage.java  |  8 
 .../apache/hadoop/hdfs/server/namenode/NNStorage.java | 14 ++
 .../hadoop/hdfs/server/common/StorageAdapter.java |  2 +-
 .../server/datanode/TestBlockPoolSliceStorage.java|  2 +-
 6 files changed, 21 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f66655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index c172289..f23a48a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -125,7 +125,7 @@ public abstract class Storage extends StorageInfo {
   }
 
   protected List storageDirs =
-  new CopyOnWriteArrayList();
+  new CopyOnWriteArrayList<>();
   
   private class DirIterator implements Iterator {
 final StorageDirType dirType;
@@ -938,7 +938,11 @@ public abstract class Storage extends StorageInfo {
   public int getNumStorageDirs() {
 return storageDirs.size();
   }
-  
+
+  public List getStorageDirs() {
+return storageDirs;
+  }
+
   public StorageDirectory getStorageDir(int idx) {
 return storageDirs.get(idx);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f66655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index dd82a74..3203de2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -295,7 +295,7 @@ public class BlockPoolSliceStorage extends Storage {
   void remove(File absPathToRemove) {
 Preconditions.checkArgument(absPathToRemove.isAbsolute());
 LOG.info("Removing block level storage: " + absPathToRemove);
-for (Iterator it = this.storageDirs.iterator();
+for (Iterator it = getStorageDirs().iterator();
  it.hasNext(); ) {
   StorageDirectory sd = it.next();
   if (sd.getRoot().getAbsoluteFile().equals(absPathToRemove)) {
@@ -788,7 +788,7 @@ public class BlockPoolSliceStorage extends Storage {
*/
   public void clearTrash() {
 final List trashRoots = new ArrayList<>();
-for (StorageDirectory sd : storageDirs) {
+for (StorageDirectory sd : getStorageDirs()) {
   File trashRoot = getTrashRootDir(sd);
   if (trashRoot.exists() && sd.getPreviousDir().exists()) {
 LOG.error("Trash and PreviousDir shouldn't both exist for storage "
@@ -826,7 +826,7 @@ public class BlockPoolSliceStorage extends Storage {
   /** trash is enabled if at least one storage directory contains trash root */
   @VisibleForTesting
   public boolean trashEnabled() {
-for (StorageDirectory sd : storageDirs) {
+for (StorageDirectory sd : getStorageDirs()) {
   if (getTrashRootDir(sd).exists()) {
 return true;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f66655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 12d932

[34/50] [abbrv] hadoop git commit: YARN-5923. Unable to access logs for a running application if YARN_ACL_ENABLE is enabled. Contributed by Xuan Gong.

2017-01-27 Thread vvasudev
YARN-5923. Unable to access logs for a running application if YARN_ACL_ENABLE 
is enabled. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fadd690
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fadd690
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fadd690

Branch: refs/heads/YARN-3926
Commit: 8fadd69047143c9c389cc09ca24100b5f90f79d2
Parents: f69a107
Author: Junping Du 
Authored: Tue Jan 3 15:03:38 2017 -0800
Committer: Junping Du 
Committed: Tue Jan 3 15:03:38 2017 -0800

--
 .../server/nodemanager/webapp/WebServer.java| 35 +---
 1 file changed, 31 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fadd690/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
index bb444db..53e529b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java
@@ -22,8 +22,11 @@ import static 
org.apache.hadoop.yarn.util.StringHelper.pajoin;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -37,6 +40,8 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import java.util.ArrayList;
+import java.util.List;
 
 public class WebServer extends AbstractService {
 
@@ -57,10 +62,11 @@ public class WebServer extends AbstractService {
 
   @Override
   protected void serviceStart() throws Exception {
-String bindAddress = WebAppUtils.getWebAppBindURL(getConfig(),
+Configuration conf = getConfig();
+String bindAddress = WebAppUtils.getWebAppBindURL(conf,
   YarnConfiguration.NM_BIND_HOST,
-  
WebAppUtils.getNMWebAppURLWithoutScheme(getConfig()));
-boolean enableCors = getConfig()
+  WebAppUtils.getNMWebAppURLWithoutScheme(conf));
+boolean enableCors = conf
 .getBoolean(YarnConfiguration.NM_WEBAPP_ENABLE_CORS_FILTER,
 YarnConfiguration.DEFAULT_NM_WEBAPP_ENABLE_CORS_FILTER);
 if (enableCors) {
@@ -68,13 +74,34 @@ public class WebServer extends AbstractService {
   + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
 }
 
+// Always load pseudo authentication filter to parse "user.name" in an URL
+// to identify a HTTP request's user.
+boolean hasHadoopAuthFilterInitializer = false;
+String filterInitializerConfKey = "hadoop.http.filter.initializers";
+Class[] initializersClasses =
+conf.getClasses(filterInitializerConfKey);
+List targets = new ArrayList();
+if (initializersClasses != null) {
+  for (Class initializer : initializersClasses) {
+if (initializer.getName().equals(
+AuthenticationFilterInitializer.class.getName())) {
+  hasHadoopAuthFilterInitializer = true;
+  break;
+}
+targets.add(initializer.getName());
+  }
+}
+if (!hasHadoopAuthFilterInitializer) {
+  targets.add(AuthenticationFilterInitializer.class.getName());
+  conf.set(filterInitializerConfKey, StringUtils.join(",", targets));
+}
 LOG.info("Instantiating NMWebApp at " + bindAddress);
 try {
   this.webApp =
   WebApps
 .$for("node", Context.class, this.nmContext, "ws")
 .at(bindAddress)
-.with(getConfig())
+.with(conf)
 .withHttpSpnegoPrincipalKey(
   YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY)
 .withHttp

[11/50] [abbrv] hadoop git commit: YARN-6024. Capacity Scheduler 'continuous reservation looking' doesn't work when sum of queue's used and reserved resources is equal to max. Contributed by Wangda Ta

2017-01-27 Thread vvasudev
YARN-6024. Capacity Scheduler 'continuous reservation looking' doesn't work 
when sum of queue's used and reserved resources is equal to max. Contributed by 
Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6715b26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6715b26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6715b26

Branch: refs/heads/YARN-3926
Commit: f6715b26b65fc127b35368d0460b786ece88b5b8
Parents: e297be7
Author: Sunil G 
Authored: Wed Dec 28 12:33:02 2016 +0530
Committer: Sunil G 
Committed: Wed Dec 28 12:33:02 2016 +0530

--
 .../capacity/TestContainerAllocation.java   | 50 
 1 file changed, 50 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6715b26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index 555e0fd..1ab29dd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -641,4 +641,54 @@ public class TestContainerAllocation {
 
 rm1.close();
   }
+
+  @Test(timeout = 6)
+  public void testContinuousReservationLookingWhenUsedEqualsMax() throws 
Exception {
+CapacitySchedulerConfiguration newConf =
+(CapacitySchedulerConfiguration) TestUtils
+.getConfigurationWithMultipleQueues(conf);
+// Set maximum capacity of A to 10
+newConf.setMaximumCapacity(CapacitySchedulerConfiguration.ROOT + ".a", 10);
+MockRM rm1 = new MockRM(newConf);
+
+rm1.getRMContext().setNodeLabelManager(mgr);
+rm1.start();
+MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB);
+MockNM nm2 = rm1.registerNode("h2:1234", 90 * GB);
+
+// launch an app to queue A, AM container should be launched in nm1
+RMApp app1 = rm1.submitApp(2 * GB, "app", "user", null, "a");
+MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
+
+// launch 2nd app to queue B, AM container should be launched in nm1
+// Now usage of nm1 is 3G (2G + 1G)
+RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "b");
+MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
+
+am1.allocate("*", 4 * GB, 2, null);
+
+CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
+RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
+
+// Do node heartbeats twice, we expect one container allocated on nm1 and
+// one container reserved on nm1.
+cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
+cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
+
+FiCaSchedulerApp schedulerApp1 =
+cs.getApplicationAttempt(am1.getApplicationAttemptId());
+
+// App1 will get 2 container allocated (plus AM container)
+Assert.assertEquals(2, schedulerApp1.getLiveContainers().size());
+Assert.assertEquals(1, schedulerApp1.getReservedContainers().size());
+
+// Do node heartbeats on nm2, we expect one container allocated on nm2 and
+// one unreserved on nm1
+cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
+Assert.assertEquals(3, schedulerApp1.getLiveContainers().size());
+Assert.assertEquals(0, schedulerApp1.getReservedContainers().size());
+
+rm1.close();
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao.

2017-01-27 Thread vvasudev
HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng 
Shao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b811a1c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b811a1c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b811a1c1

Branch: refs/heads/YARN-3926
Commit: b811a1c14d00ab236158ab75fad1fe41364045a4
Parents: 165d01a
Author: Haohui Mai 
Authored: Fri Dec 30 22:17:49 2016 -0800
Committer: Haohui Mai 
Committed: Fri Dec 30 22:17:49 2016 -0800

--
 .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java| 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b811a1c1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 26cfc01..c89913e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -659,7 +659,9 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
-  conn.disconnect();
+  // Don't call conn.disconnect() to allow connection reuse
+  // See http://tinyurl.com/java7-http-keepalive
+  conn.getInputStream().close();
 }
   }
   try {
@@ -891,7 +893,9 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Response decoding failure.", e);
 throw ioe;
   } finally {
-conn.disconnect();
+// Don't call conn.disconnect() to allow connection reuse
+// See http://tinyurl.com/java7-http-keepalive
+conn.getInputStream().close();
   }
 }
 
@@ -938,6 +942,9 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
+  // This is a connection to DataNode.  Let's disconnect since
+  // there is little chance that the connection will be reused
+  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: HDFS-11252. TestFileTruncate#testTruncateWithDataNodesRestartImmediately can fail with BindException. Contributed by Yiqun Lin.

2017-01-27 Thread vvasudev
HDFS-11252. TestFileTruncate#testTruncateWithDataNodesRestartImmediately can 
fail with BindException. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ddb8def
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ddb8def
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ddb8def

Branch: refs/heads/YARN-3926
Commit: 0ddb8defad6a7fd5eb69847d1789ba51952c0cf0
Parents: 9262797
Author: Brahma Reddy Battula 
Authored: Wed Dec 28 10:34:33 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Dec 28 10:34:33 2016 +0530

--
 .../hdfs/server/namenode/TestFileTruncate.java  | 31 ++--
 1 file changed, 15 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ddb8def/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index d203413..0ea587c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -681,13 +681,7 @@ public class TestFileTruncate {
 int toTruncateLength = 1;
 int newLength = startingFileSize - toTruncateLength;
 cluster.getDataNodes().get(dn).shutdown();
-try {
-  boolean isReady = fs.truncate(p, newLength);
-  assertFalse(isReady);
-} finally {
-  cluster.restartDataNode(dn, true, true);
-  cluster.waitActive();
-}
+truncateAndRestartDN(p, dn, newLength);
 checkBlockRecovery(p);
 
 LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
@@ -740,13 +734,7 @@ public class TestFileTruncate {
 int toTruncateLength = 1;
 int newLength = startingFileSize - toTruncateLength;
 cluster.getDataNodes().get(dn).shutdown();
-try {
-  boolean isReady = fs.truncate(p, newLength);
-  assertFalse(isReady);
-} finally {
-  cluster.restartDataNode(dn, true, true);
-  cluster.waitActive();
-}
+truncateAndRestartDN(p, dn, newLength);
 checkBlockRecovery(p);
 
 LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
@@ -800,8 +788,8 @@ public class TestFileTruncate {
 boolean isReady = fs.truncate(p, newLength);
 assertFalse(isReady);
 
-cluster.restartDataNode(dn0, true, true);
-cluster.restartDataNode(dn1, true, true);
+cluster.restartDataNode(dn0, false, true);
+cluster.restartDataNode(dn1, false, true);
 cluster.waitActive();
 checkBlockRecovery(p);
 
@@ -1242,4 +1230,15 @@ public class TestFileTruncate {
 .build();
 fs = cluster.getFileSystem();
   }
+
+  private void truncateAndRestartDN(Path p, int dn, int newLength)
+  throws IOException {
+try {
+  boolean isReady = fs.truncate(p, newLength);
+  assertFalse(isReady);
+} finally {
+  cluster.restartDataNode(dn, false, true);
+  cluster.waitActive();
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: Revert "HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed by Zheng Shao."

2017-01-27 Thread vvasudev
Revert "HDFS-11280. Allow WebHDFS to reuse HTTP connections to NN. Contributed 
by Zheng Shao."

This reverts commit b811a1c14d00ab236158ab75fad1fe41364045a4.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b31e1951
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b31e1951
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b31e1951

Branch: refs/heads/YARN-3926
Commit: b31e1951e044b2c6f6e88a007a8c175941ddd674
Parents: 6938b67
Author: Brahma Reddy Battula 
Authored: Tue Jan 3 19:19:29 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Jan 3 19:19:29 2017 +0530

--
 .../org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java| 11 ++-
 1 file changed, 2 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b31e1951/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index c89913e..26cfc01 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -659,9 +659,7 @@ public class WebHdfsFileSystem extends FileSystem
   url = new URL(conn.getHeaderField("Location"));
   redirectHost = url.getHost() + ":" + url.getPort();
 } finally {
-  // Don't call conn.disconnect() to allow connection reuse
-  // See http://tinyurl.com/java7-http-keepalive
-  conn.getInputStream().close();
+  conn.disconnect();
 }
   }
   try {
@@ -893,9 +891,7 @@ public class WebHdfsFileSystem extends FileSystem
 LOG.debug("Response decoding failure.", e);
 throw ioe;
   } finally {
-// Don't call conn.disconnect() to allow connection reuse
-// See http://tinyurl.com/java7-http-keepalive
-conn.getInputStream().close();
+conn.disconnect();
   }
 }
 
@@ -942,9 +938,6 @@ public class WebHdfsFileSystem extends FileSystem
 try {
   validateResponse(op, conn, true);
 } finally {
-  // This is a connection to DataNode.  Let's disconnect since
-  // there is little chance that the connection will be reused
-  // any time soonl
   conn.disconnect();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HADOOP-13942. Build failure due to errors of javadoc build in hadoop-azure. Contributed by Kai Sasaki

2017-01-27 Thread vvasudev
HADOOP-13942. Build failure due to errors of javadoc build in hadoop-azure. 
Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6a5b689
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6a5b689
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6a5b689

Branch: refs/heads/YARN-3926
Commit: c6a5b689db573046819f0e9193cb042e6c7298f4
Parents: 95c2c24
Author: Mingliang Liu 
Authored: Thu Dec 29 16:23:23 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Dec 29 16:24:06 2016 -0800

--
 .../org/apache/hadoop/fs/azure/KeyProvider.java |  3 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 31 +---
 .../hadoop/fs/azure/NativeFileSystemStore.java  |  5 ++--
 .../fs/azure/SASKeyGeneratorInterface.java  |  6 ++--
 .../hadoop/fs/azure/SelfRenewingLease.java  |  2 +-
 .../hadoop/fs/azure/SendRequestIntercept.java   |  3 +-
 .../hadoop/fs/azure/StorageInterface.java   | 28 +++---
 .../metrics/AzureFileSystemInstrumentation.java |  2 ++
 8 files changed, 51 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a5b689/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java
index 4c3a369..ed510f6 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/KeyProvider.java
@@ -36,7 +36,8 @@ public interface KeyProvider {
* @param conf
*  Hadoop configuration parameters
* @return the plaintext storage account key
-   * @throws KeyProviderException
+   * @throws KeyProviderException Thrown if there is a problem instantiating a
+   * KeyProvider or retrieving a key using a KeyProvider object.
*/
   String getStorageAccountKey(String accountName, Configuration conf)
   throws KeyProviderException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a5b689/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 307c2fa..b742e53 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -268,7 +268,8 @@ public class NativeAzureFileSystem extends FileSystem {
  *"innerFile2"
  *  ]
  * } }
- * @throws IOException
+ * @param fs file system on which a file is written.
+ * @throws IOException Thrown when fail to write file.
  */
 public void writeFile(FileSystem fs) throws IOException {
   Path path = getRenamePendingFilePath();
@@ -292,6 +293,8 @@ public class NativeAzureFileSystem extends FileSystem {
 /**
  * Return the contents of the JSON file to represent the operations
  * to be performed for a folder rename.
+ *
+ * @return JSON string which represents the operation.
  */
 public String makeRenamePendingFileContents() {
   SimpleDateFormat sdf = new SimpleDateFormat("-MM-dd HH:mm:ss.SSS");
@@ -418,7 +421,7 @@ public class NativeAzureFileSystem extends FileSystem {
  * when everything is working normally. See redo() for the alternate
  * execution path for the case where we're recovering from a folder rename
  * failure.
- * @throws IOException
+ * @throws IOException Thrown when fail to renaming.
  */
 public void execute() throws IOException {
 
@@ -472,7 +475,8 @@ public class NativeAzureFileSystem extends FileSystem {
 }
 
 /** Clean up after execution of rename.
- * @throws IOException */
+ * @throws IOException Thrown when fail to clean up.
+ * */
 public void cleanup() throws IOException {
 
   if (fs.getStoreInterface().isAtomicRenameKey(srcKey)) {
@@ -496,7 +500,7 @@ public class NativeAzureFileSystem extends FileSystem {
  * Recover from a folder rename failure by redoing the intended work,
  * as recorded in the -RenamePending.json file.
  * 
- * @throws IOException
+ * @throws IOException Thrown when fail to redo.
  */
 public void redo() throws IOException {
 
@@ -1120,6 +1124,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
   /**
* 

[28/50] [abbrv] hadoop git commit: HADOOP-13946. Document how HDFS updates timestamps in the FS spec; compare with object stores. Contributed by Steve Loughran

2017-01-27 Thread vvasudev
HADOOP-13946. Document how HDFS updates timestamps in the FS spec; compare with 
object stores. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/451efb08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/451efb08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/451efb08

Branch: refs/heads/YARN-3926
Commit: 451efb08fe0680d002c6856c104ebb366acee8a0
Parents: 591fb15
Author: Mingliang Liu 
Authored: Tue Jan 3 13:08:38 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Jan 3 13:08:38 2017 -0800

--
 .../site/markdown/filesystem/introduction.md| 33 
 1 file changed, 33 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/451efb08/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
index f6db557..2e6f19b 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
@@ -392,3 +392,36 @@ Object stores with these characteristics, can not be used 
as a direct replacemen
 for HDFS. In terms of this specification, their implementations of the
 specified operations do not match those required. They are considered supported
 by the Hadoop development community, but not to the same extent as HDFS.
+
+ Timestamps
+
+The HDFS filesystem does not update the modification time while it is being 
written to.
+
+Specifically
+
+* `FileSystem.create()` creation: a zero-byte file is listed; creation and 
modification time is
+  set to the current time as seen on the NameNode.
+* Writes to a file via the output stream returned in the `create()` call: the 
modification
+  time *does not change*.
+* When `OutputStream.close()` is called, all remaining data is written, the 
file closed and
+  the NameNode updated with the final size of the file. The modification time 
is set to
+  the time the file was closed.
+* Opening a file for appends via an `append()` operation does not change the 
modification
+  time of the file until the `close()` call is made on the output stream.
+* `FileSystem.setTimes()` can be used to explicitly set the time on a file.
+* The rarely used operations:  `FileSystem.concat()`, `createSnapshot()`, 
`createSymlink()` and
+  `truncate()` all update the modification time.
+
+Other filesystems may have different behaviors.
+
+Object stores have a significantly simpler view of time:
+
+ * The file only becomes visible at the end of the write operation; this also 
sets
+   the creation time of the file.
+ * The timestamp is likely to be in UTC or the TZ of the object store. If the
+   client is in a different timezone, the timestamp may be ahead or behind that
+   of the client.
+ * A file's modification time is always the same as its creation time.
+ * The `FileSystem.setTimes()` operation to set file timestamps will generally 
be ignored.
+ * If `FileSystem.append()` is supported, the changes and modification time
+ are likely to only become visible after the output stream is closed.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: HDFS-11278. Add missing @Test annotation for TestSafeMode.testSafeModeUtils() (Contributed by Lukas Majercak via Daniel Templeton)

2017-01-27 Thread vvasudev
HDFS-11278. Add missing @Test annotation for TestSafeMode.testSafeModeUtils() 
(Contributed by Lukas Majercak via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce3613c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce3613c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce3613c9

Branch: refs/heads/YARN-3926
Commit: ce3613c9656e57efdc73edd1787bd9622d1b716e
Parents: f216276
Author: Daniel Templeton 
Authored: Wed Dec 28 20:09:00 2016 -0800
Committer: Daniel Templeton 
Committed: Wed Dec 28 20:09:00 2016 -0800

--
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce3613c9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 4940d45..ada61b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -482,6 +482,7 @@ public class TestSafeMode {
* Tests some utility methods that surround the SafeMode's state.
* @throws IOException when there's an issue connecting to the test DFS.
*/
+  @Test
   public void testSafeModeUtils() throws IOException {
 dfs = cluster.getFileSystem();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HADOOP-13883. Addendum patch to correct the message and alphabetized with the earlier patch. Contributed by Yiqun Lin.

2017-01-27 Thread vvasudev
HADOOP-13883. Addendum patch to correct the message and alphabetized  with the 
earlier patch. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6938b677
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6938b677
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6938b677

Branch: refs/heads/YARN-3926
Commit: 6938b677111a5fa6bf654717b310220f98b7cf20
Parents: b811a1c
Author: Brahma Reddy Battula 
Authored: Sat Dec 31 16:16:02 2016 +0530
Committer: Brahma Reddy Battula 
Committed: Sat Dec 31 16:16:02 2016 +0530

--
 .../org/apache/hadoop/util/GenericOptionsParser.java  | 14 --
 .../hadoop-common/src/site/markdown/CommandsManual.md |  2 +-
 .../hadoop/mapred/pipes/TestPipeApplication.java  |  4 +++-
 3 files changed, 12 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6938b677/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index eed910c..cd1fc83 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -212,10 +212,11 @@ public class GenericOptionsParser {
*/
   @SuppressWarnings("static-access")
   private static synchronized Options buildGeneralOptions(Options opts) {
-Option fs = OptionBuilder.withArgName("local|namenode:port")
-.hasArg()
-.withDescription("specify a namenode")
-.create("fs");
+Option fs = OptionBuilder.withArgName("file:///|hdfs://namenode:port")
+.hasArg()
+.withDescription("specify default filesystem URL to use, "
++ "overrides 'fs.defaultFS' property from configurations.")
+.create("fs");
 Option jt = OptionBuilder.withArgName("local|resourcemanager:port")
 .hasArg()
 .withDescription("specify a ResourceManager")
@@ -569,8 +570,9 @@ public class GenericOptionsParser {
 + "specify an application configuration file");
 out.println("-D"
 + "define a value for a given property");
-out.println("-fs  "
-+ "specify a namenode");
+out.println("-fs  "
++ "specify default filesystem URL to use, overrides "
++ "'fs.defaultFS' property from configurations.");
 out.println("-jt   "
 + "specify a ResourceManager");
 out.println("-files "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6938b677/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 696848b..27a858a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -54,11 +54,11 @@ Many subcommands honor a common set of configuration 
options to alter their beha
 
 | GENERIC\_OPTION | Description |
 |: |: |
-| `-fs  or ` | Specify a NameNode. |
 | `-archives  ` | Specify comma separated 
archives to be unarchived on the compute machines. Applies only to job. |
 | `-conf  ` | Specify an application configuration file. |
 | `-D = ` | Use value for given property. |
 | `-files  ` | Specify comma separated files to 
be copied to the map reduce cluster. Applies only to job. |
+| `-fs  or ` | Specify default filesystem URL 
to use. Overrides 'fs.defaultFS' property from configurations. |
 | `-jt  or ` | Specify a ResourceManager. Applies 
only to job. |
 | `-libjars  ` | Specify comma separated jar 
files to include in the classpath. Applies only to job. |
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6938b677/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
index c8b2f3a..88d8f95 100644
--- 
a/hadoop-mapreduce-project/hado

[17/50] [abbrv] hadoop git commit: HDFS-11251. ConcurrentModificationException during DataNode#refreshVolumes. (Manoj Govindassamy via lei)

2017-01-27 Thread vvasudev
HDFS-11251. ConcurrentModificationException during DataNode#refreshVolumes. 
(Manoj Govindassamy via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9f13968
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9f13968
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9f13968

Branch: refs/heads/YARN-3926
Commit: e9f1396834174646a8d7aa8fc6c4a4f724ca5b28
Parents: 603f3ef
Author: Lei Xu 
Authored: Thu Dec 29 15:10:36 2016 +0800
Committer: Lei Xu 
Committed: Thu Dec 29 15:11:25 2016 +0800

--
 .../hadoop/hdfs/server/common/Storage.java  |   6 +-
 .../server/datanode/BlockPoolSliceStorage.java  |   2 +-
 .../hdfs/server/datanode/DataStorage.java   |   2 +-
 .../datanode/TestDataNodeHotSwapVolumes.java| 154 +--
 4 files changed, 150 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f13968/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 1f03fc2..c172289 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -32,6 +32,7 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
+import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
@@ -122,8 +123,9 @@ public abstract class Storage extends StorageInfo {
 public StorageDirType getStorageDirType();
 public boolean isOfType(StorageDirType type);
   }
-  
-  protected List storageDirs = new 
ArrayList();
+
+  protected List storageDirs =
+  new CopyOnWriteArrayList();
   
   private class DirIterator implements Iterator {
 final StorageDirType dirType;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f13968/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 9bd221e..dd82a74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -299,7 +299,7 @@ public class BlockPoolSliceStorage extends Storage {
  it.hasNext(); ) {
   StorageDirectory sd = it.next();
   if (sd.getRoot().getAbsoluteFile().equals(absPathToRemove)) {
-it.remove();
+this.storageDirs.remove(sd);
 break;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f13968/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 5163e6b..12d9322 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -508,7 +508,7 @@ public class DataStorage extends Storage {
   bpsStorage.remove(bpRoot.getAbsoluteFile());
 }
 
-it.remove();
+this.storageDirs.remove(sd);
 try {
   sd.unlock();
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f13968/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 96d1a28..0401a81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanod

[40/50] [abbrv] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-01-27 Thread vvasudev
YARN-4081. Add support for multiple resource types in the Resource class. 
(Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09f27e5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09f27e5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09f27e5b

Branch: refs/heads/YARN-3926
Commit: 09f27e5bb54251bb812314012397693dd8ee4e74
Parents: e49e0a6
Author: Wangda Tan 
Authored: Thu Sep 10 09:43:26 2015 -0700
Committer: Varun Vasudev 
Committed: Fri Jan 27 23:11:51 2017 +0530

--
 .../src/main/resources/META-INF/LICENSE.txt | 1661 ++
 .../src/main/resources/META-INF/NOTICE.txt  |  283 +++
 .../yarn/api/protocolrecords/ResourceTypes.java |   27 +
 .../hadoop/yarn/api/records/Resource.java   |  204 ++-
 .../yarn/api/records/ResourceInformation.java   |  218 +++
 .../exceptions/ResourceNotFoundException.java   |   45 +
 .../hadoop/yarn/util/UnitsConversionUtil.java   |  197 +++
 .../src/main/proto/yarn_protos.proto|   12 +
 .../yarn/conf/TestResourceInformation.java  |   70 +
 .../yarn/util/TestUnitsConversionUtil.java  |  120 ++
 .../yarn/api/records/impl/pb/ProtoUtils.java|   13 +
 .../api/records/impl/pb/ResourcePBImpl.java |  197 ++-
 .../hadoop/yarn/util/resource/Resources.java|  136 +-
 .../hadoop/yarn/api/TestPBImplRecords.java  |3 +
 14 files changed, 3097 insertions(+), 89 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: MAPREDUCE-5155. Race condition in test case TestFetchFailure cause it to fail (Contributed by Haibo Chen via Daniel Templeton)

2017-01-27 Thread vvasudev
MAPREDUCE-5155. Race condition in test case TestFetchFailure cause it to fail 
(Contributed by Haibo Chen via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95c2c24c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95c2c24c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95c2c24c

Branch: refs/heads/YARN-3926
Commit: 95c2c24cd0007ece9d7cd0fbe4c5932828e639d4
Parents: e769957
Author: Daniel Templeton 
Authored: Thu Dec 29 10:49:29 2016 -0800
Committer: Daniel Templeton 
Committed: Thu Dec 29 10:50:47 2016 -0800

--
 .../hadoop/mapreduce/v2/app/TestFetchFailure.java  | 17 +++--
 1 file changed, 15 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95c2c24c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
index 8d25079..cb2a29e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
 
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.Counters;
@@ -45,6 +46,7 @@ import 
org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEv
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import 
org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.junit.Assert;
 import org.junit.Test;
@@ -79,8 +81,19 @@ public class TestFetchFailure {
 
 // wait for map success
 app.waitForState(mapTask, TaskState.SUCCEEDED);
-
-TaskAttemptCompletionEvent[] events = 
+
+final int checkIntervalMillis = 10;
+final int waitForMillis = 800;
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+TaskAttemptCompletionEvent[] events = job
+.getTaskAttemptCompletionEvents(0, 100);
+return events.length >= 1;
+  }
+}, checkIntervalMillis, waitForMillis);
+
+TaskAttemptCompletionEvent[] events =
   job.getTaskAttemptCompletionEvents(0, 100);
 Assert.assertEquals("Num completion events not correct",
 1, events.length);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HADOOP-13932. Fix indefinite article in comments (Contributed by LiXin Ge via Daniel Templeton)

2017-01-27 Thread vvasudev
HADOOP-13932. Fix indefinite article in comments (Contributed by LiXin Ge via 
Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e216e8e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e216e8e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e216e8e2

Branch: refs/heads/YARN-3926
Commit: e216e8e2334519b7c833d99586218e99a39265f3
Parents: ac1e5d4
Author: Daniel Templeton 
Authored: Tue Dec 27 11:12:05 2016 -0800
Committer: Daniel Templeton 
Committed: Tue Dec 27 13:03:16 2016 -0800

--
 .../namenode/FSImagePreTransactionalStorageInspector.java| 2 +-
 .../hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md  | 8 
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/LibHdfs.md | 2 +-
 .../dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml   | 2 +-
 .../dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml   | 2 +-
 .../src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java| 4 ++--
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java  | 2 +-
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml   | 8 
 .../resourcemanager/recovery/FileSystemRMStateStore.java | 2 +-
 .../resourcemanager/scheduler/fair/FSQueueMetrics.java   | 2 +-
 .../java/org/apache/hadoop/yarn/server/MiniYARNCluster.java  | 2 +-
 11 files changed, 18 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e216e8e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
index 3cd5ae1..6469385 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
@@ -41,7 +41,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.io.IOUtils;
 
 /**
- * Inspects a FSImage storage directory in the "old" (pre-HDFS-1073) format.
+ * Inspects an FSImage storage directory in the "old" (pre-HDFS-1073) format.
  * This format has the following data files:
  *   - fsimage
  *   - fsimage.ckpt (when checkpoint is being uploaded)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e216e8e2/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md
index f9b76b8..889d7a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md
@@ -65,7 +65,7 @@ Note that rolling upgrade is supported only from Hadoop-2.4.0 
onwards.
 
 ### Upgrade without Downtime
 
-In a HA cluster, there are two or more *NameNodes (NNs)*, many *DataNodes 
(DNs)*,
+In an HA cluster, there are two or more *NameNodes (NNs)*, many *DataNodes 
(DNs)*,
 a few *JournalNodes (JNs)* and a few *ZooKeeperNodes (ZKNs)*.
 *JNs* is relatively stable and does not require upgrade when upgrading HDFS in 
most of the cases.
 In the rolling upgrade procedure described here,
@@ -76,7 +76,7 @@ Upgrading *JNs* and *ZKNs* may incur cluster downtime.
 
 Suppose there are two namenodes *NN1* and *NN2*,
 where *NN1* and *NN2* are respectively in active and standby states.
-The following are the steps for upgrading a HA cluster:
+The following are the steps for upgrading an HA cluster:
 
 1. Prepare Rolling Upgrade
 1. Run "[`hdfs dfsadmin -rollingUpgrade 
prepare`](#dfsadmin_-rollingUpgrade)"
@@ -133,7 +133,7 @@ However, datanodes can still be upgraded in a rolling 
manner.
 
 In a non-HA cluster, there are a *NameNode (NN)*, a *SecondaryNameNode (SNN)*
 and many *DataNodes (DNs)*.
-The procedure for upgrading a non-HA cluster is similar to upgrading a HA 
cluster
+The procedure for upgrading a non-HA cluster is similar to upgrading an HA 
cluster
 except that Step 2 "Upgrade Active and Standby *NNs*" is changed to below:
 
 * Upgrade *NN* and *SNN*
@@ -175,7 +175,7 @@ A newer release is downgradable to the pre-upgrade release
 only if both the namenode layout version and the datanode layout version
 are not changed between these two releases.
 
-In a HA cluster,
+In an HA clust

[37/50] [abbrv] hadoop git commit: YARN-5988. RM unable to start in secure setup. Contributed by Ajith S.

2017-01-27 Thread vvasudev
YARN-5988. RM unable to start in secure setup. Contributed by Ajith S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e49e0a6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e49e0a6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e49e0a6e

Branch: refs/heads/YARN-3926
Commit: e49e0a6e37f4a32535d7d4a07015fbf9eb33c74a
Parents: 01d31fe
Author: Rohith Sharma K S 
Authored: Wed Jan 4 10:29:31 2017 +0530
Committer: Rohith Sharma K S 
Committed: Wed Jan 4 12:09:29 2017 +0530

--
 .../server/resourcemanager/AdminService.java|  8 +++
 .../resourcemanager/TestRMAdminService.java | 56 +++-
 2 files changed, 50 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e49e0a6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 028b6f0..04ee5d8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -536,6 +536,7 @@ public class AdminService extends CompositeService 
implements
 checkRMStatus(user.getShortUserName(), operation, "refresh Service ACLs.");
 
 refreshServiceAcls();
+refreshActiveServicesAcls();
 RMAuditLogger.logSuccess(user.getShortUserName(), operation,
 "AdminService");
 
@@ -549,6 +550,13 @@ public class AdminService extends CompositeService 
implements
 YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);
 
 refreshServiceAcls(conf, policyProvider);
+  }
+
+  private void refreshActiveServicesAcls() throws IOException, YarnException  {
+PolicyProvider policyProvider = RMPolicyProvider.getInstance();
+Configuration conf =
+getConfiguration(new Configuration(false),
+YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);
 rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
 rmContext.getApplicationMasterService().refreshServiceAcls(
 conf, policyProvider);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e49e0a6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index fa01b55..9ae28c2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
@@ -770,20 +770,7 @@ public class TestRMAdminService {
   YarnException {
 StateChangeRequestInfo requestInfo = new StateChangeRequestInfo(
 HAServiceProtocol.RequestSource.REQUEST_BY_USER);
-configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
-"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
-configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
-configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
-configuration.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
-int base = 100;
-for (String confKey : YarnConfiguration
-.getServiceAddressConfKeys(configuration)) {
-  configuration.set(HAUtil.addSuffix(confKey, "rm1"), "0.0.0.0:"
-  + (base + 20));
-  configuration.set(HAUtil.addSuffix(confKey, "rm2"), "0.0.0.0:"
-  + (base + 40));
-  base = base * 2;
-}
+updateConfigurationForRMHA();
 Configuration conf1 = new Configuration(configuration);
 conf1.set(YarnConfiguration.RM

[02/50] [abbrv] hadoop git commit: YARN-5969. FairShareComparator: Cache value of getResourceUsage for better performance. (zhangshilong via kasha)

2017-01-27 Thread vvasudev
YARN-5969. FairShareComparator: Cache value of getResourceUsage for better 
performance. (zhangshilong via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3973e70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3973e70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3973e70

Branch: refs/heads/YARN-3926
Commit: c3973e7080bf71b57ace4a6adf4bb43f3c5d35b5
Parents: c0e0ef2
Author: Karthik Kambatla 
Authored: Tue Dec 27 12:30:35 2016 -0800
Committer: Karthik Kambatla 
Committed: Tue Dec 27 12:30:35 2016 -0800

--
 .../scheduler/fair/policies/FairSharePolicy.java | 19 +++
 1 file changed, 11 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3973e70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index f120f0f..d47ea07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -79,29 +79,32 @@ public class FairSharePolicy extends SchedulingPolicy {
   double minShareRatio1, minShareRatio2;
   double useToWeightRatio1, useToWeightRatio2;
   double weight1, weight2;
+  //Do not repeat the getResourceUsage calculation
+  Resource resourceUsage1 = s1.getResourceUsage();
+  Resource resourceUsage2 = s2.getResourceUsage();
   Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
   s1.getMinShare(), s1.getDemand());
   Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
   s2.getMinShare(), s2.getDemand());
   boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
-  s1.getResourceUsage(), minShare1);
+  resourceUsage1, minShare1);
   boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
-  s2.getResourceUsage(), minShare2);
-  minShareRatio1 = (double) s1.getResourceUsage().getMemorySize()
+  resourceUsage2, minShare2);
+  minShareRatio1 = (double) resourceUsage1.getMemorySize()
   / Resources.max(RESOURCE_CALCULATOR, null, minShare1, 
ONE).getMemorySize();
-  minShareRatio2 = (double) s2.getResourceUsage().getMemorySize()
+  minShareRatio2 = (double) resourceUsage2.getMemorySize()
   / Resources.max(RESOURCE_CALCULATOR, null, minShare2, 
ONE).getMemorySize();
 
   weight1 = s1.getWeights().getWeight(ResourceType.MEMORY);
   weight2 = s2.getWeights().getWeight(ResourceType.MEMORY);
   if (weight1 > 0.0 && weight2 > 0.0) {
-useToWeightRatio1 = s1.getResourceUsage().getMemorySize() / weight1;
-useToWeightRatio2 = s2.getResourceUsage().getMemorySize() / weight2;
+useToWeightRatio1 = resourceUsage1.getMemorySize() / weight1;
+useToWeightRatio2 = resourceUsage2.getMemorySize() / weight2;
   } else { // Either weight1 or weight2 equals to 0
 if (weight1 == weight2) {
   // If they have same weight, just compare usage
-  useToWeightRatio1 = s1.getResourceUsage().getMemorySize();
-  useToWeightRatio2 = s2.getResourceUsage().getMemorySize();
+  useToWeightRatio1 = resourceUsage1.getMemorySize();
+  useToWeightRatio2 = resourceUsage2.getMemorySize();
 } else {
   // By setting useToWeightRatios to negative weights, we give the
   // zero-weight one less priority, so the non-zero weight one will


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: YARN-5962. Spelling errors in logging and exceptions for resource manager code (gsohn via rkanter)

2017-01-27 Thread vvasudev
YARN-5962. Spelling errors in logging and exceptions for resource manager code 
(gsohn via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bbd0232
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bbd0232
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bbd0232

Branch: refs/heads/YARN-3926
Commit: 1bbd023275db535ab80fcb60e022151e9679d468
Parents: e216e8e
Author: Robert Kanter 
Authored: Tue Dec 27 13:35:34 2016 -0800
Committer: Robert Kanter 
Committed: Tue Dec 27 13:35:34 2016 -0800

--
 .../server/resourcemanager/ClientRMService.java |  8 
 .../recovery/MemoryRMStateStore.java|  2 +-
 .../resourcemanager/reservation/InMemoryPlan.java   |  4 ++--
 .../reservation/ReservationInputValidator.java  |  2 +-
 .../server/resourcemanager/rmnode/RMNodeImpl.java   |  2 +-
 .../resourcemanager/scheduler/SchedulerUtils.java   |  2 +-
 .../allocator/RegularContainerAllocator.java|  2 +-
 .../scheduler/fifo/FifoScheduler.java   |  2 +-
 .../webapp/CapacitySchedulerPage.java   |  2 +-
 .../resourcemanager/TestContainerResourceUsage.java |  8 
 .../reservation/TestReservationInputValidator.java  | 16 
 11 files changed, 25 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bbd0232/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 3dc7e38..cdf30a1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -410,7 +410,7 @@ public class ClientRMService extends AbstractService 
implements
   response = 
GetApplicationAttemptReportResponse.newInstance(attemptReport);
 }else{
   throw new YarnException("User " + callerUGI.getShortUserName()
-  + " does not have privilage to see this attempt " + appAttemptId);
+  + " does not have privilege to see this attempt " + appAttemptId);
 }
 return response;
   }
@@ -450,7 +450,7 @@ public class ClientRMService extends AbstractService 
implements
   response = GetApplicationAttemptsResponse.newInstance(listAttempts);
 } else {
   throw new YarnException("User " + callerUGI.getShortUserName()
-  + " does not have privilage to see this application " + appId);
+  + " does not have privilege to see this application " + appId);
 }
 return response;
   }
@@ -501,7 +501,7 @@ public class ClientRMService extends AbstractService 
implements
   .createContainerReport());
 } else {
   throw new YarnException("User " + callerUGI.getShortUserName()
-  + " does not have privilage to see this application " + appId);
+  + " does not have privilege to see this application " + appId);
 }
 return response;
   }
@@ -554,7 +554,7 @@ public class ClientRMService extends AbstractService 
implements
   response = GetContainersResponse.newInstance(listContainers);
 } else {
   throw new YarnException("User " + callerUGI.getShortUserName()
-  + " does not have privilage to see this application " + appId);
+  + " does not have privilege to see this application " + appId);
 }
 return response;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bbd0232/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java
index 8b4ace3..5f3328b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resour

[35/50] [abbrv] hadoop git commit: HDFS-11279. Cleanup unused DataNode#checkDiskErrorAsync(). Contributed by Hanisha Koneru

2017-01-27 Thread vvasudev
HDFS-11279. Cleanup unused DataNode#checkDiskErrorAsync(). Contributed by 
Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87bb1c49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87bb1c49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87bb1c49

Branch: refs/heads/YARN-3926
Commit: 87bb1c49bb25f75b040028b1cebe3bc5251836d1
Parents: 8fadd69
Author: Xiaoyu Yao 
Authored: Tue Jan 3 18:25:46 2017 -0800
Committer: Xiaoyu Yao 
Committed: Tue Jan 3 18:25:46 2017 -0800

--
 .../hadoop/hdfs/server/datanode/DataNode.java   | 19 --
 .../datanode/checker/DatasetVolumeChecker.java  | 69 
 .../hdfs/server/datanode/DataNodeTestUtils.java | 15 +
 .../datanode/TestDataNodeHotSwapVolumes.java| 18 +
 .../datanode/TestDataNodeVolumeFailure.java | 17 ++---
 .../checker/TestDatasetVolumeChecker.java   | 47 -
 6 files changed, 26 insertions(+), 159 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bb1c49/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index e893c5e..28d627a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2051,25 +2051,6 @@ public class DataNode extends ReconfigurableBase
* Check if there is a disk failure asynchronously
* and if so, handle the error.
*/
-  @VisibleForTesting
-  public void checkDiskErrorAsync() {
-volumeChecker.checkAllVolumesAsync(
-data, (healthyVolumes, failedVolumes) -> {
-if (failedVolumes.size() > 0) {
-  LOG.warn("checkDiskErrorAsync callback got {} failed volumes: {}",
-  failedVolumes.size(), failedVolumes);
-} else {
-  LOG.debug("checkDiskErrorAsync: no volume failures detected");
-}
-lastDiskErrorCheck = Time.monotonicNow();
-handleVolumeFailures(failedVolumes);
-  });
-  }
-
-  /**
-   * Check if there is a disk failure asynchronously
-   * and if so, handle the error.
-   */
   public void checkDiskErrorAsync(FsVolumeSpi volume) {
 volumeChecker.checkVolume(
 volume, (healthyVolumes, failedVolumes) -> {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bb1c49/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index cab6122..9ad47f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -233,68 +233,6 @@ public class DatasetVolumeChecker {
   }
 
   /**
-   * Start checks against all volumes of a dataset, invoking the
-   * given callback when the operation has completed. The function
-   * does not wait for the checks to complete.
-   *
-   * If a volume cannot be referenced then it is already closed and
-   * cannot be checked. No error is propagated to the callback for that
-   * volume.
-   *
-   * @param dataset - FsDatasetSpi to be checked.
-   * @param callback - Callback to be invoked when the checks are complete.
-   * @return true if the check was scheduled and the callback will be invoked.
-   * false if the check was not scheduled and the callback will not be
-   * invoked.
-   */
-  public boolean checkAllVolumesAsync(
-  final FsDatasetSpi dataset,
-  Callback callback) {
-final long gap = timer.monotonicNow() - lastAllVolumesCheck;
-if (gap < minDiskCheckGapMs) {
-  numSkippedChecks.incrementAndGet();
-  LOG.trace(
-  "Skipped checking all volumes, time since last check {} is less " +
-  "than the minimum gap between checks ({} ms).",
-  gap, minDiskCheckGapMs);
-  return false;
-}
-
-final FsDatasetSpi.FsVolumeReferences references =
-dataset.getFsVolumeReferences();
-
-if (references.size() == 0) {
-  LOG.warn("checkAllVolumesAsync - no volumes can be re

  1   2   >