hadoop git commit: Setting branch version to 2.9.0 before RC

2017-11-02 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.0 [created] 571cca929


Setting branch version to 2.9.0 before RC


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/571cca92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/571cca92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/571cca92

Branch: refs/heads/branch-2.9.0
Commit: 571cca9295f9a12e770eea6709a7b24ce6b9a374
Parents: 6f5a95f
Author: Arun Suresh 
Authored: Thu Nov 2 23:41:48 2017 -0700
Committer: Arun Suresh 
Committed: Thu Nov 2 23:41:48 2017 -0700

--
 hadoop-assemblies/pom.xml   |  4 +-
 hadoop-build-tools/pom.xml  |  2 +-
 hadoop-client/pom.xml   |  4 +-
 .../hadoop-annotations/pom.xml  |  4 +-
 .../hadoop-auth-examples/pom.xml|  4 +-
 hadoop-common-project/hadoop-auth/pom.xml   |  4 +-
 hadoop-common-project/hadoop-common/pom.xml |  4 +-
 hadoop-common-project/hadoop-kms/pom.xml|  4 +-
 hadoop-common-project/hadoop-minikdc/pom.xml|  4 +-
 hadoop-common-project/hadoop-nfs/pom.xml|  4 +-
 hadoop-common-project/pom.xml   |  4 +-
 hadoop-dist/pom.xml |  4 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |  4 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |  4 +-
 .../hadoop-hdfs-native-client/pom.xml   |  4 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml |  4 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  4 +-
 .../hadoop-hdfs/src/contrib/bkjournal/pom.xml   |  4 +-
 hadoop-hdfs-project/pom.xml |  4 +-
 .../hadoop-mapreduce-client-app/pom.xml |  4 +-
 .../hadoop-mapreduce-client-common/pom.xml  |  4 +-
 .../hadoop-mapreduce-client-core/pom.xml|  4 +-
 .../hadoop-mapreduce-client-hs-plugins/pom.xml  |  4 +-
 .../hadoop-mapreduce-client-hs/pom.xml  |  4 +-
 .../hadoop-mapreduce-client-jobclient/pom.xml   |  4 +-
 .../hadoop-mapreduce-client-shuffle/pom.xml |  4 +-
 .../hadoop-mapreduce-client/pom.xml |  4 +-
 .../hadoop-mapreduce-examples/pom.xml   |  4 +-
 hadoop-mapreduce-project/pom.xml|  4 +-
 hadoop-maven-plugins/pom.xml|  2 +-
 hadoop-minicluster/pom.xml  |  4 +-
 hadoop-project-dist/pom.xml |  4 +-
 hadoop-project/pom.xml  |  4 +-
 hadoop-project/src/site/markdown/index.md.vm| 46 +++-
 hadoop-tools/hadoop-ant/pom.xml |  4 +-
 hadoop-tools/hadoop-archive-logs/pom.xml|  4 +-
 hadoop-tools/hadoop-archives/pom.xml|  4 +-
 hadoop-tools/hadoop-aws/pom.xml |  4 +-
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  2 +-
 hadoop-tools/hadoop-azure/pom.xml   |  2 +-
 hadoop-tools/hadoop-datajoin/pom.xml|  4 +-
 hadoop-tools/hadoop-distcp/pom.xml  |  4 +-
 hadoop-tools/hadoop-extras/pom.xml  |  4 +-
 hadoop-tools/hadoop-gridmix/pom.xml |  4 +-
 hadoop-tools/hadoop-openstack/pom.xml   |  4 +-
 hadoop-tools/hadoop-pipes/pom.xml   |  4 +-
 hadoop-tools/hadoop-resourceestimator/pom.xml   |  4 +-
 hadoop-tools/hadoop-rumen/pom.xml   |  4 +-
 hadoop-tools/hadoop-sls/pom.xml |  4 +-
 hadoop-tools/hadoop-streaming/pom.xml   |  4 +-
 hadoop-tools/hadoop-tools-dist/pom.xml  |  4 +-
 hadoop-tools/pom.xml|  4 +-
 .../hadoop-yarn/hadoop-yarn-api/pom.xml |  4 +-
 .../pom.xml |  4 +-
 .../pom.xml |  4 +-
 .../hadoop-yarn-applications/pom.xml|  4 +-
 .../hadoop-yarn/hadoop-yarn-client/pom.xml  |  4 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |  4 +-
 .../hadoop-yarn/hadoop-yarn-registry/pom.xml|  4 +-
 .../pom.xml |  4 +-
 .../hadoop-yarn-server-common/pom.xml   |  4 +-
 .../hadoop-yarn-server-nodemanager/pom.xml  |  4 +-
 .../hadoop-yarn-server-resourcemanager/pom.xml  |  4 +-
 .../hadoop-yarn-server-router/pom.xml   |  4 +-
 .../pom.xml |  4 +-
 .../hadoop-yarn-server-tests/pom.xml|  4 +-
 .../pom.xml |  4 +-
 .../pom.xml |  4 +-
 .../pom.xml |  2 +-
 .../hadoop-yarn-server-timelineservice/pom.xml  |  4 +-
 .../hadoop-yarn-server-web-proxy/pom.xml|  4 +-
 .../hadoop-yarn/hadoop-yarn-server/pom.xml  |  4 +-
 .../hadoop-yarn/hadoop-yarn-site/pom.xml|  4 +-
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml  |  4 +-
 hadoop-yarn-project/hadoop-yarn/pom.xml |  4 +-
 hadoop-yarn-proje

hadoop git commit: YARN-7392. Render cluster information on new YARN web ui. Contributed by Vasudevan Skm.

2017-11-02 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk b00f828d8 -> c41728486


YARN-7392. Render cluster information on new YARN web ui. Contributed by 
Vasudevan Skm.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4172848
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4172848
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4172848

Branch: refs/heads/trunk
Commit: c41728486bcf79c678e803c74ec81ff159a6b6ea
Parents: b00f828
Author: Sunil G 
Authored: Fri Nov 3 11:45:50 2017 +0530
Committer: Sunil G 
Committed: Fri Nov 3 11:45:50 2017 +0530

--
 .../main/webapp/app/helpers/date-formatter.js   | 29 
 .../src/main/webapp/app/helpers/lower.js| 27 +++
 .../src/main/webapp/app/initializers/jquery.js  | 35 +++
 .../src/main/webapp/app/models/cluster-info.js  |  7 ++--
 .../src/main/webapp/app/routes/application.js   | 13 +--
 .../src/main/webapp/app/styles/app.css  | 36 
 .../main/webapp/app/templates/application.hbs   | 13 +--
 .../tests/unit/helpers/date-formatter-test.js   | 28 +++
 .../webapp/tests/unit/helpers/lower-test.js | 28 +++
 .../hadoop-yarn-ui/src/main/webapp/yarn.lock| 12 +++
 10 files changed, 222 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4172848/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/date-formatter.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/date-formatter.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/date-formatter.js
new file mode 100644
index 000..17834e4
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/date-formatter.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+import Converter from 'yarn-ui/utils/converter';
+
+export function dateFormatter(params) {
+  const [timestamp, dateOnly] = params;
+
+  return dateOnly ? Converter.timeStampToDateOnly(timestamp) : 
Converter.timeStampToDate(timestamp);
+}
+
+export default Ember.Helper.helper(dateFormatter);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4172848/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/lower.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/lower.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/lower.js
new file mode 100644
index 000..e519905
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/lower.js
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import Ember from 'ember';
+
+export function lower(params) {
+  const string = params[0];
+  return string.toLowerCase();
+}
+
+export default Ember.Helper.helper(lower);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4172848/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/jquery.js
---

hadoop git commit: Preparing for 2.9.1 development

2017-11-02 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 6f5a95ff9 -> bb6a1aa5f


Preparing for 2.9.1 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb6a1aa5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb6a1aa5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb6a1aa5

Branch: refs/heads/branch-2.9
Commit: bb6a1aa5fac18941ecd15502e0dec23997f81e44
Parents: 6f5a95f
Author: Arun Suresh 
Authored: Thu Nov 2 22:49:38 2017 -0700
Committer: Arun Suresh 
Committed: Thu Nov 2 22:49:38 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client/pom.xml| 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml| 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-ant/pom.xml  | 4 ++--
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml   | 4 ++--
 hadoop-tools/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml  | 4 ++--
 .../hadoop-yarn-applications-distributedshell/pom.xml| 4 ++--
 .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/p

hadoop git commit: HDFS-12682. ECAdmin -listPolicies will always show SystemErasureCodingPolicies state as DISABLED.

2017-11-02 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1700adc6f -> e565b5277


HDFS-12682. ECAdmin -listPolicies will always show SystemErasureCodingPolicies 
state as DISABLED.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e565b527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e565b527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e565b527

Branch: refs/heads/trunk
Commit: e565b5277d5b890dad107fe85e295a3907e4bfc1
Parents: 1700adc
Author: Xiao Chen 
Authored: Thu Nov 2 21:26:45 2017 -0700
Committer: Xiao Chen 
Committed: Thu Nov 2 21:27:35 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |   3 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|   4 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   2 +-
 .../hdfs/protocol/ErasureCodingPolicy.java  |  62 ++---
 .../hdfs/protocol/ErasureCodingPolicyInfo.java  | 106 
 .../hdfs/protocol/ErasureCodingPolicyState.java |   4 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  13 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  57 +++--
 .../protocol/TestErasureCodingPolicyInfo.java   |  72 +++
 ...tNamenodeProtocolServerSideTranslatorPB.java |   8 +-
 .../federation/router/RouterRpcServer.java  |   4 +-
 .../namenode/ErasureCodingPolicyManager.java| 126 ---
 .../server/namenode/FSDirErasureCodingOp.java   |   7 +-
 .../server/namenode/FSImageFormatProtobuf.java  |  10 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   7 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   4 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |   7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  16 +++
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  22 ++--
 .../server/namenode/TestFSEditLogLoader.java|  12 +-
 .../hdfs/server/namenode/TestFSImage.java   |  17 +--
 .../java/org/apache/hadoop/fs/TestDFSIO.java|  16 +--
 23 files changed, 421 insertions(+), 162 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e565b527/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8d51a9c..467d6be 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -119,6 +119,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -2793,7 +2794,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
+  public ErasureCodingPolicyInfo[] getErasureCodingPolicies()
+  throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("getErasureCodingPolicies")) {
   return namenode.getErasureCodingPolicies();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e565b527/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 2a6bd21..9db12e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apach

hadoop git commit: HDFS-12725. BlockPlacementPolicyRackFaultTolerant fails with very uneven racks.

2017-11-02 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6fc09beac -> b00f828d8


HDFS-12725. BlockPlacementPolicyRackFaultTolerant fails with very uneven racks.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b00f828d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b00f828d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b00f828d

Branch: refs/heads/trunk
Commit: b00f828d84e4e029fd4786ebe827ce704a1b2a04
Parents: 6fc09be
Author: Xiao Chen 
Authored: Thu Nov 2 21:51:14 2017 -0700
Committer: Xiao Chen 
Committed: Thu Nov 2 21:53:13 2017 -0700

--
 .../BlockPlacementPolicyRackFaultTolerant.java  |  70 --
 .../hdfs/TestErasureCodingMultipleRacks.java| 131 ---
 2 files changed, 173 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b00f828d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
index 1eac3ea..95c5c88 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
@@ -62,10 +62,17 @@ public class BlockPlacementPolicyRackFaultTolerant extends 
BlockPlacementPolicyD
* randomly.
* 2. If total replica expected is bigger than numOfRacks, it choose:
*  2a. Fill each rack exactly (maxNodesPerRack-1) replicas.
-   *  2b. For some random racks, place one more replica to each one of them, 
until
-   *  numOfReplicas have been chosen. 
-   * In the end, the difference of the numbers of replicas for each two racks
-   * is no more than 1.
+   *  2b. For some random racks, place one more replica to each one of them,
+   *  until numOfReplicas have been chosen. 
+   * 3. If after step 2, there are still replicas not placed (due to some
+   * racks have fewer datanodes than maxNodesPerRack), the rest of the replicas
+   * is placed evenly on the rest of the racks who have Datanodes that have
+   * not been placed a replica.
+   * 4. If after step 3, there are still replicas not placed. A
+   * {@link NotEnoughReplicasException} is thrown.
+   * 
+   * For normal setups, step 2 would suffice. So in the end, the difference
+   * of the numbers of replicas for each two racks is no more than 1.
* Either way it always prefer local storage.
* @return local node of writer
*/
@@ -132,24 +139,63 @@ public class BlockPlacementPolicyRackFaultTolerant 
extends BlockPlacementPolicyD
   chooseOnce(numOfReplicas, writer, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 } catch (NotEnoughReplicasException e) {
-  LOG.debug("Only able to place {} of {} (maxNodesPerRack={}) nodes " +
-  "evenly across racks, falling back to uneven placement.",
-  results.size(), numOfReplicas, maxNodesPerRack);
+  LOG.warn("Only able to place {} of total expected {}"
+  + " (maxNodesPerRack={}, numOfReplicas={}) nodes "
+  + "evenly across racks, falling back to evenly place on the "
+  + "remaining racks. This may not guarantee rack-level fault "
+  + "tolerance. Please check if the racks are configured 
properly.",
+  results.size(), totalReplicaExpected, maxNodesPerRack, 
numOfReplicas);
   LOG.debug("Caught exception was:", e);
+  chooseEvenlyFromRemainingRacks(writer, excludedNodes, blocksize,
+  maxNodesPerRack, results, avoidStaleNodes, storageTypes,
+  totalReplicaExpected, e);
+
+}
+
+return writer;
+  }
+
+  /**
+   * Choose as evenly as possible from the racks which have available 
datanodes.
+   */
+  private void chooseEvenlyFromRemainingRacks(Node writer,
+  Set excludedNodes, long blocksize, int maxNodesPerRack,
+  List results, boolean avoidStaleNodes,
+  EnumMap storageTypes, int totalReplicaExpected,
+  NotEnoughReplicasException e) throws NotEnoughReplicasException {
+int numResultsOflastChoose = 0;
+NotEnoughReplicasException lastException = e;
+int bestEffortMaxNodesPerRack = maxNodesPerRack;
+while (results.size() != totalReplicaExpected &&
+numResultsOflastChoose != results.size()) {
   // Exclude the chosen nodes
+  final Set newExcludeNodes =

hadoop git commit: HDFS-12725. BlockPlacementPolicyRackFaultTolerant fails with very uneven racks.

2017-11-02 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 3e70fd9af -> 621a50ebb


HDFS-12725. BlockPlacementPolicyRackFaultTolerant fails with very uneven racks.

(cherry picked from commit e1187bad8e9b9abb54a55b5f9ab8373b0f64e6d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/621a50eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/621a50eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/621a50eb

Branch: refs/heads/branch-3.0
Commit: 621a50ebb141cd0b51931fc6f665ff0742505be9
Parents: 3e70fd9
Author: Xiao Chen 
Authored: Thu Nov 2 21:51:14 2017 -0700
Committer: Xiao Chen 
Committed: Thu Nov 2 21:53:20 2017 -0700

--
 .../BlockPlacementPolicyRackFaultTolerant.java  |  70 --
 .../hdfs/TestErasureCodingMultipleRacks.java| 131 ---
 2 files changed, 173 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/621a50eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
index 1eac3ea..95c5c88 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
@@ -62,10 +62,17 @@ public class BlockPlacementPolicyRackFaultTolerant extends 
BlockPlacementPolicyD
* randomly.
* 2. If total replica expected is bigger than numOfRacks, it choose:
*  2a. Fill each rack exactly (maxNodesPerRack-1) replicas.
-   *  2b. For some random racks, place one more replica to each one of them, 
until
-   *  numOfReplicas have been chosen. 
-   * In the end, the difference of the numbers of replicas for each two racks
-   * is no more than 1.
+   *  2b. For some random racks, place one more replica to each one of them,
+   *  until numOfReplicas have been chosen. 
+   * 3. If after step 2, there are still replicas not placed (due to some
+   * racks have fewer datanodes than maxNodesPerRack), the rest of the replicas
+   * is placed evenly on the rest of the racks who have Datanodes that have
+   * not been placed a replica.
+   * 4. If after step 3, there are still replicas not placed. A
+   * {@link NotEnoughReplicasException} is thrown.
+   * 
+   * For normal setups, step 2 would suffice. So in the end, the difference
+   * of the numbers of replicas for each two racks is no more than 1.
* Either way it always prefer local storage.
* @return local node of writer
*/
@@ -132,24 +139,63 @@ public class BlockPlacementPolicyRackFaultTolerant 
extends BlockPlacementPolicyD
   chooseOnce(numOfReplicas, writer, excludedNodes, blocksize,
   maxNodesPerRack, results, avoidStaleNodes, storageTypes);
 } catch (NotEnoughReplicasException e) {
-  LOG.debug("Only able to place {} of {} (maxNodesPerRack={}) nodes " +
-  "evenly across racks, falling back to uneven placement.",
-  results.size(), numOfReplicas, maxNodesPerRack);
+  LOG.warn("Only able to place {} of total expected {}"
+  + " (maxNodesPerRack={}, numOfReplicas={}) nodes "
+  + "evenly across racks, falling back to evenly place on the "
+  + "remaining racks. This may not guarantee rack-level fault "
+  + "tolerance. Please check if the racks are configured 
properly.",
+  results.size(), totalReplicaExpected, maxNodesPerRack, 
numOfReplicas);
   LOG.debug("Caught exception was:", e);
+  chooseEvenlyFromRemainingRacks(writer, excludedNodes, blocksize,
+  maxNodesPerRack, results, avoidStaleNodes, storageTypes,
+  totalReplicaExpected, e);
+
+}
+
+return writer;
+  }
+
+  /**
+   * Choose as evenly as possible from the racks which have available 
datanodes.
+   */
+  private void chooseEvenlyFromRemainingRacks(Node writer,
+  Set excludedNodes, long blocksize, int maxNodesPerRack,
+  List results, boolean avoidStaleNodes,
+  EnumMap storageTypes, int totalReplicaExpected,
+  NotEnoughReplicasException e) throws NotEnoughReplicasException {
+int numResultsOflastChoose = 0;
+NotEnoughReplicasException lastException = e;
+int bestEffortMaxNodesPerRack = maxNodesPerRack;
+while (results.size() != totalReplicaExpected &&
+numResultsOflastChoose != results

hadoop git commit: YARN-7434. Router getApps REST invocation fails with multiple RMs. Contributed by Inigo Goiri.

2017-11-02 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1c96e0d69 -> 3e70fd9af


YARN-7434. Router getApps REST invocation fails with multiple RMs. Contributed 
by Inigo Goiri.

(cherry picked from commit 6e2259264ad9525eeec2a14055d53236711659d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e70fd9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e70fd9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e70fd9a

Branch: refs/heads/branch-3.0
Commit: 3e70fd9af06b007cadddc98d2806941716de4c86
Parents: 1c96e0d
Author: Inigo Goiri 
Authored: Thu Nov 2 21:29:34 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Nov 2 21:30:20 2017 -0700

--
 .../webapp/FederationInterceptorREST.java   | 26 +---
 1 file changed, 17 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e70fd9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
index 6e67634..626d794 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
@@ -682,10 +682,10 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
 CompletionService compSvc =
 new ExecutorCompletionService<>(this.threadpool);
 
+// HttpServletRequest does not work with ExecutorCompletionService.
+// Create a duplicate hsr.
+final HttpServletRequest hsrCopy = clone(hsr);
 for (final SubClusterInfo info : subClustersActive.values()) {
-  // HttpServletRequest does not work with ExecutorCompletionService.
-  // Create a duplicate hsr.
-  final HttpServletRequest hsrCopy = clone(hsr);
   compSvc.submit(new Callable() {
 @Override
 public AppsInfo call() {
@@ -744,24 +744,32 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
 if (hsr == null) {
   return null;
 }
+@SuppressWarnings("unchecked")
+final Map parameterMap =
+(Map) hsr.getParameterMap();
+final String pathInfo = hsr.getPathInfo();
+final String user = hsr.getRemoteUser();
+final Principal principal = hsr.getUserPrincipal();
+final String mediaType =
+RouterWebServiceUtil.getMediaTypeFromHttpServletRequest(
+hsr, AppsInfo.class);
 return new HttpServletRequestWrapper(hsr) {
 public Map getParameterMap() {
-  return hsr.getParameterMap();
+  return parameterMap;
 }
 public String getPathInfo() {
-  return hsr.getPathInfo();
+  return pathInfo;
 }
 public String getRemoteUser() {
-  return hsr.getRemoteUser();
+  return user;
 }
 public Principal getUserPrincipal() {
-  return hsr.getUserPrincipal();
+  return principal;
 }
 public String getHeader(String value) {
   // we override only Accept
   if (value.equals(HttpHeaders.ACCEPT)) {
-return RouterWebServiceUtil.getMediaTypeFromHttpServletRequest(
-hsr, AppsInfo.class);
+return mediaType;
   }
   return null;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12720. Ozone: Ratis options are not passed from KSM Client protobuf helper correctly. Contributed by Mukul Kumar Singh.

2017-11-02 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 6e74039a1 -> 013c36f3c


HDFS-12720. Ozone: Ratis options are not passed from KSM Client protobuf helper 
correctly. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/013c36f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/013c36f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/013c36f3

Branch: refs/heads/HDFS-7240
Commit: 013c36f3cc2995506ad108017c3e9d20aef1a0d2
Parents: 6e74039
Author: Mukul Kumar Singh 
Authored: Fri Nov 3 09:59:25 2017 +0530
Committer: Mukul Kumar Singh 
Committed: Fri Nov 3 09:59:25 2017 +0530

--
 .../ozone/client/io/ChunkGroupOutputStream.java |  21 +++-
 .../hadoop/ozone/client/rpc/RpcClient.java  |   2 +
 .../hadoop/ozone/ksm/helpers/KsmKeyArgs.java|   8 +-
 ...ceManagerProtocolClientSideTranslatorPB.java |   4 +
 .../org/apache/hadoop/scm/XceiverClient.java|  27 ++--
 .../apache/hadoop/scm/XceiverClientManager.java |   1 +
 .../apache/hadoop/scm/XceiverClientRatis.java   |  22 ++--
 .../common/helpers/BlockContainerInfo.java  |   8 ++
 .../main/java/org/apache/ratis/RatisHelper.java |  17 ++-
 .../scm/container/ContainerStateManager.java|  15 ++-
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  21 ++--
 .../web/storage/DistributedStorageHandler.java  |   2 +
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  29 +
 .../ozone/client/rpc/TestOzoneRpcClient.java| 124 ++-
 .../apache/hadoop/ozone/tools/TestCorona.java   |  34 -
 15 files changed, 278 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/013c36f3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index b63596f..d0975a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.client.io;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Result;
+import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.ReplicationType;
+import org.apache.hadoop.ozone.protocol.proto.OzoneProtos.ReplicationFactor;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
@@ -100,7 +102,8 @@ public class ChunkGroupOutputStream extends OutputStream {
   OpenKeySession handler, XceiverClientManager xceiverClientManager,
   StorageContainerLocationProtocolClientSideTranslatorPB scmClient,
   KeySpaceManagerProtocolClientSideTranslatorPB ksmClient,
-  int chunkSize, String requestId) throws IOException {
+  int chunkSize, String requestId, ReplicationFactor factor,
+  ReplicationType type) throws IOException {
 this.streamEntries = new ArrayList<>();
 this.currentStreamIndex = 0;
 this.byteOffset = 0;
@@ -111,6 +114,8 @@ public class ChunkGroupOutputStream extends OutputStream {
 .setVolumeName(info.getVolumeName())
 .setBucketName(info.getBucketName())
 .setKeyName(info.getKeyName())
+.setType(type)
+.setFactor(factor)
 .setDataSize(info.getDataSize()).build();
 this.openID = handler.getId();
 this.xceiverClientManager = xceiverClientManager;
@@ -292,6 +297,8 @@ public class ChunkGroupOutputStream extends OutputStream {
 private KeySpaceManagerProtocolClientSideTranslatorPB ksmClient;
 private int chunkSize;
 private String requestID;
+private ReplicationType type;
+private ReplicationFactor factor;
 
 public Builder setHandler(OpenKeySession handler) {
   this.openHandler = handler;
@@ -325,9 +332,19 @@ public class ChunkGroupOutputStream extends OutputStream {
   return this;
 }
 
+public Builder setType(ReplicationType type) {
+  this.type = type;
+  return this;
+}
+
+public Builder setFactor(ReplicationFactor replicationFactor) {
+  this.factor = replicationFactor;
+  return this;
+}
+
 public ChunkGroupOutputStream build() throws IOException {
   return new ChunkGroupOutputStream(openHandler, xceiverManager, scmClient,
-  

hadoop git commit: YARN-7434. Router getApps REST invocation fails with multiple RMs. Contributed by Inigo Goiri.

2017-11-02 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk e565b5277 -> 6fc09beac


YARN-7434. Router getApps REST invocation fails with multiple RMs. Contributed 
by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fc09bea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fc09bea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fc09bea

Branch: refs/heads/trunk
Commit: 6fc09beac497b40928b6b0524d83ecf6e7abf932
Parents: e565b52
Author: Inigo Goiri 
Authored: Thu Nov 2 21:29:34 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Nov 2 21:29:53 2017 -0700

--
 .../webapp/FederationInterceptorREST.java   | 26 +---
 1 file changed, 17 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc09bea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
index 6e67634..626d794 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
@@ -682,10 +682,10 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
 CompletionService compSvc =
 new ExecutorCompletionService<>(this.threadpool);
 
+// HttpServletRequest does not work with ExecutorCompletionService.
+// Create a duplicate hsr.
+final HttpServletRequest hsrCopy = clone(hsr);
 for (final SubClusterInfo info : subClustersActive.values()) {
-  // HttpServletRequest does not work with ExecutorCompletionService.
-  // Create a duplicate hsr.
-  final HttpServletRequest hsrCopy = clone(hsr);
   compSvc.submit(new Callable() {
 @Override
 public AppsInfo call() {
@@ -744,24 +744,32 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
 if (hsr == null) {
   return null;
 }
+@SuppressWarnings("unchecked")
+final Map parameterMap =
+(Map) hsr.getParameterMap();
+final String pathInfo = hsr.getPathInfo();
+final String user = hsr.getRemoteUser();
+final Principal principal = hsr.getUserPrincipal();
+final String mediaType =
+RouterWebServiceUtil.getMediaTypeFromHttpServletRequest(
+hsr, AppsInfo.class);
 return new HttpServletRequestWrapper(hsr) {
 public Map getParameterMap() {
-  return hsr.getParameterMap();
+  return parameterMap;
 }
 public String getPathInfo() {
-  return hsr.getPathInfo();
+  return pathInfo;
 }
 public String getRemoteUser() {
-  return hsr.getRemoteUser();
+  return user;
 }
 public Principal getUserPrincipal() {
-  return hsr.getUserPrincipal();
+  return principal;
 }
 public String getHeader(String value) {
   // we override only Accept
   if (value.equals(HttpHeaders.ACCEPT)) {
-return RouterWebServiceUtil.getMediaTypeFromHttpServletRequest(
-hsr, AppsInfo.class);
+return mediaType;
   }
   return null;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7434. Router getApps REST invocation fails with multiple RMs. Contributed by Inigo Goiri.

2017-11-02 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 a60bb3b36 -> 6f5a95ff9


YARN-7434. Router getApps REST invocation fails with multiple RMs. Contributed 
by Inigo Goiri.

(cherry picked from commit 6e2259264ad9525eeec2a14055d53236711659d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f5a95ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f5a95ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f5a95ff

Branch: refs/heads/branch-2.9
Commit: 6f5a95ff98a68fab518cb237d858e38e2c21d3df
Parents: a60bb3b
Author: Inigo Goiri 
Authored: Thu Nov 2 21:29:34 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Nov 2 21:35:45 2017 -0700

--
 .../webapp/FederationInterceptorREST.java   | 26 +---
 1 file changed, 17 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f5a95ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
index 2860d10..a890ff3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
@@ -684,10 +684,10 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
 CompletionService compSvc =
 new ExecutorCompletionService<>(this.threadpool);
 
+// HttpServletRequest does not work with ExecutorCompletionService.
+// Create a duplicate hsr.
+final HttpServletRequest hsrCopy = clone(hsr);
 for (final SubClusterInfo info : subClustersActive.values()) {
-  // HttpServletRequest does not work with ExecutorCompletionService.
-  // Create a duplicate hsr.
-  final HttpServletRequest hsrCopy = clone(hsr);
   compSvc.submit(new Callable() {
 @Override
 public AppsInfo call() {
@@ -746,25 +746,33 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
 if (hsr == null) {
   return null;
 }
+@SuppressWarnings("unchecked")
+final Map parameterMap =
+(Map) hsr.getParameterMap();
+final String pathInfo = hsr.getPathInfo();
+final String user = hsr.getRemoteUser();
+final Principal principal = hsr.getUserPrincipal();
+final String mediaType =
+RouterWebServiceUtil.getMediaTypeFromHttpServletRequest(
+hsr, AppsInfo.class);
 return new HttpServletRequestWrapper(hsr) {
 @SuppressWarnings("unchecked")
 public Map getParameterMap() {
-  return (Map) hsr.getParameterMap();
+  return parameterMap;
 }
 public String getPathInfo() {
-  return hsr.getPathInfo();
+  return pathInfo;
 }
 public String getRemoteUser() {
-  return hsr.getRemoteUser();
+  return user;
 }
 public Principal getUserPrincipal() {
-  return hsr.getUserPrincipal();
+  return principal;
 }
 public String getHeader(String value) {
   // we override only Accept
   if (value.equals(HttpHeaders.ACCEPT)) {
-return RouterWebServiceUtil.getMediaTypeFromHttpServletRequest(
-hsr, AppsInfo.class);
+return mediaType;
   }
   return null;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7434. Router getApps REST invocation fails with multiple RMs. Contributed by Inigo Goiri.

2017-11-02 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 931987f47 -> 2d8d420f2


YARN-7434. Router getApps REST invocation fails with multiple RMs. Contributed 
by Inigo Goiri.

(cherry picked from commit 6e2259264ad9525eeec2a14055d53236711659d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d8d420f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d8d420f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d8d420f

Branch: refs/heads/branch-2
Commit: 2d8d420f26ef897b19f89139fcd1238283b36de1
Parents: 931987f
Author: Inigo Goiri 
Authored: Thu Nov 2 21:29:34 2017 -0700
Committer: Inigo Goiri 
Committed: Thu Nov 2 21:34:34 2017 -0700

--
 .../webapp/FederationInterceptorREST.java   | 26 +---
 1 file changed, 17 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d8d420f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
index 2860d10..a890ff3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
@@ -684,10 +684,10 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
 CompletionService compSvc =
 new ExecutorCompletionService<>(this.threadpool);
 
+// HttpServletRequest does not work with ExecutorCompletionService.
+// Create a duplicate hsr.
+final HttpServletRequest hsrCopy = clone(hsr);
 for (final SubClusterInfo info : subClustersActive.values()) {
-  // HttpServletRequest does not work with ExecutorCompletionService.
-  // Create a duplicate hsr.
-  final HttpServletRequest hsrCopy = clone(hsr);
   compSvc.submit(new Callable() {
 @Override
 public AppsInfo call() {
@@ -746,25 +746,33 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
 if (hsr == null) {
   return null;
 }
+@SuppressWarnings("unchecked")
+final Map parameterMap =
+(Map) hsr.getParameterMap();
+final String pathInfo = hsr.getPathInfo();
+final String user = hsr.getRemoteUser();
+final Principal principal = hsr.getUserPrincipal();
+final String mediaType =
+RouterWebServiceUtil.getMediaTypeFromHttpServletRequest(
+hsr, AppsInfo.class);
 return new HttpServletRequestWrapper(hsr) {
 @SuppressWarnings("unchecked")
 public Map getParameterMap() {
-  return (Map) hsr.getParameterMap();
+  return parameterMap;
 }
 public String getPathInfo() {
-  return hsr.getPathInfo();
+  return pathInfo;
 }
 public String getRemoteUser() {
-  return hsr.getRemoteUser();
+  return user;
 }
 public Principal getUserPrincipal() {
-  return hsr.getUserPrincipal();
+  return principal;
 }
 public String getHeader(String value) {
   // we override only Accept
   if (value.equals(HttpHeaders.ACCEPT)) {
-return RouterWebServiceUtil.getMediaTypeFromHttpServletRequest(
-hsr, AppsInfo.class);
+return mediaType;
   }
   return null;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12682. ECAdmin -listPolicies will always show SystemErasureCodingPolicies state as DISABLED.

2017-11-02 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1b6de3fac -> 1c96e0d69


HDFS-12682. ECAdmin -listPolicies will always show SystemErasureCodingPolicies 
state as DISABLED.

(cherry picked from commit 4f4cea56b25d5acc14dd6d61dea70fd4a0c7a9d9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c96e0d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c96e0d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c96e0d6

Branch: refs/heads/branch-3.0
Commit: 1c96e0d69397bb687e26036784a7b656cdeadc39
Parents: 1b6de3f
Author: Xiao Chen 
Authored: Thu Nov 2 21:26:45 2017 -0700
Committer: Xiao Chen 
Committed: Thu Nov 2 21:27:43 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |   3 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|   4 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   2 +-
 .../hdfs/protocol/ErasureCodingPolicy.java  |  62 ++---
 .../hdfs/protocol/ErasureCodingPolicyInfo.java  | 106 
 .../hdfs/protocol/ErasureCodingPolicyState.java |   4 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  13 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  57 +++--
 .../protocol/TestErasureCodingPolicyInfo.java   |  72 +++
 ...tNamenodeProtocolServerSideTranslatorPB.java |   8 +-
 .../federation/router/RouterRpcServer.java  |   4 +-
 .../namenode/ErasureCodingPolicyManager.java| 126 ---
 .../server/namenode/FSDirErasureCodingOp.java   |   7 +-
 .../server/namenode/FSImageFormatProtobuf.java  |  10 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   7 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   4 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |   7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  16 +++
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  22 ++--
 .../server/namenode/TestFSEditLogLoader.java|  12 +-
 .../hdfs/server/namenode/TestFSImage.java   |  17 +--
 .../java/org/apache/hadoop/fs/TestDFSIO.java|  16 +--
 23 files changed, 421 insertions(+), 162 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c96e0d6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8d51a9c..467d6be 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -119,6 +119,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -2793,7 +2794,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
+  public ErasureCodingPolicyInfo[] getErasureCodingPolicies()
+  throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("getErasureCodingPolicies")) {
   return namenode.getErasureCodingPolicies();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c96e0d6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index a8a5cfa..a90c95d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org

hadoop git commit: HDFS-12310: [SPS]: Provide an option to track the status of in progress requests. Contributed by Surendra Singh Lilhore.

2017-11-02 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 de86f100d -> 44b0b08b1


HDFS-12310: [SPS]: Provide an option to track the status of in progress 
requests. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44b0b08b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44b0b08b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44b0b08b

Branch: refs/heads/HDFS-10285
Commit: 44b0b08b19c007fc2e2570ca3d1a6a8ebed60347
Parents: de86f10
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 3 08:18:14 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Fri Nov 3 08:18:14 2017 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  22 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  21 
 .../hadoop/hdfs/protocol/HdfsConstants.java |  27 +
 .../ClientNamenodeProtocolTranslatorPB.java |  20 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  33 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  17 ++-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  23 +++-
 .../server/blockmanagement/BlockManager.java|  12 ++
 .../namenode/BlockStorageMovementNeeded.java| 109 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 ++-
 .../server/namenode/StoragePolicySatisfier.java |   8 ++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  35 +-
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestPersistentStoragePolicySatisfier.java   |   2 +-
 .../namenode/TestStoragePolicySatisfier.java|  67 
 .../hdfs/tools/TestStoragePolicyCommands.java   |  18 +++
 16 files changed, 424 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8ea0407..859e693 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
@@ -3115,4 +3116,25 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 checkOpen();
 return new OpenFilesIterator(namenode, tracer);
   }
+
+  /**
+   * Check the storage policy satisfy status of the path for which
+   * {@link DFSClient#satisfyStoragePolicy(String)} is called.
+   *
+   * @return Storage policy satisfy status.
+   * 
+   * PENDING if path is in queue and not processed for satisfying
+   * the policy.
+   * IN_PROGRESS if satisfying the storage policy for path.
+   * SUCCESS if storage policy satisfied for the path.
+   * NOT_AVAILABLE if
+   * {@link DFSClient#satisfyStoragePolicy(String)} not called for
+   * path or SPS work is already finished.
+   * 
+   * @throws IOException
+   */
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+return namenode.checkStoragePolicySatisfyPathStatus(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44b0b08b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 66989f7..81ab932 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protoco

hadoop git commit: YARN-7410. Cleanup FixedValueResource to avoid dependency to ResourceUtils. Contributed by Wangda Tan.

2017-11-02 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk ad0fff2b4 -> 1700adc6f


YARN-7410. Cleanup FixedValueResource to avoid dependency to ResourceUtils. 
Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1700adc6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1700adc6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1700adc6

Branch: refs/heads/trunk
Commit: 1700adc6f7e0ec9f855f554cd0d470a8f04e9c51
Parents: ad0fff2
Author: Sunil G 
Authored: Fri Nov 3 07:25:29 2017 +0530
Committer: Sunil G 
Committed: Fri Nov 3 07:25:29 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   |  2 +-
 .../yarn/util/resource/ResourceUtils.java   |  1 +
 .../hadoop/yarn/util/resource/Resources.java| 59 
 3 files changed, 61 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1700adc6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 6bdde18..be292ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -364,7 +364,7 @@ public abstract class Resource implements 
Comparable {
 }
   }
 
-  private void throwExceptionWhenArrayOutOfBound(int index) {
+  protected void throwExceptionWhenArrayOutOfBound(int index) {
 String exceptionMsg = String.format(
 "Trying to access ResourceInformation for given index=%d. "
 + "Acceptable index range is [0,%d), please check double check "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1700adc6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 8f75909..c9cc27b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -267,6 +267,7 @@ public class ResourceUtils {
 updateKnownResources();
 updateResourceTypeIndex();
 initializedResources = true;
+numKnownResourceTypes = resourceTypes.size();
   }
 
   private static void updateKnownResources() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1700adc6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 3690946..068e7f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -118,6 +118,65 @@ public class Resources {
   throw new RuntimeException(name + " cannot be modified!");
 }
 
+/*
+ *  FixedValueResource cannot be updated when any resource types refresh
+ *  by using approach introduced by YARN-7307 and do operations like
+ *  Resources.compare(resource_x, Resources.none()) will throw exceptions.
+ *
+ *  That's why we do reinitialize resource maps for following methods.
+ */
+
+@Override
+public ResourceInformation getResourceInformation(int index)
+throws ResourceNotFoundException {
+  ResourceInformation ri = null;
+  try {
+ri = super.getResourceInformation(index);
+  } catch (ResourceNotFoundException e) {
+// Retry once to reinitialize resource information.
+initResourceMap();
+try {
+  return super.getResourceInformation(index);
+} catch (ResourceNotFoundException ee) {
+  throwExceptionWhenArrayOutOfBound(index

hadoop git commit: YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. (Daniel Templeton via Subru).

2017-11-02 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 e44ff3f60 -> a60bb3b36


YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. 
(Daniel Templeton via Subru).

(cherry picked from commit ad0fff2b419e8fe8bb5c9a7e19b79dec16cbd96f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a60bb3b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a60bb3b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a60bb3b3

Branch: refs/heads/branch-2.9
Commit: a60bb3b36f42a44f0e86cadacdf8e5651c324002
Parents: e44ff3f
Author: Subru Krishnan 
Authored: Thu Nov 2 17:39:23 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Nov 2 17:42:38 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a60bb3b3/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6825a36..b3e04fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -174,6 +174,18 @@
 
 
   
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. (Daniel Templeton via Subru).

2017-11-02 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0200fbac5 -> 931987f47


YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. 
(Daniel Templeton via Subru).

(cherry picked from commit ad0fff2b419e8fe8bb5c9a7e19b79dec16cbd96f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/931987f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/931987f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/931987f4

Branch: refs/heads/branch-2
Commit: 931987f4702f1d5233503d19d0040c95cabb2259
Parents: 0200fba
Author: Subru Krishnan 
Authored: Thu Nov 2 17:39:23 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Nov 2 17:40:26 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/931987f4/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6825a36..b3e04fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -174,6 +174,18 @@
 
 
   
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. (Daniel Templeton via Subru).

2017-11-02 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 cbd81f305 -> 1b6de3fac


YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. 
(Daniel Templeton via Subru).

(cherry picked from commit ad0fff2b419e8fe8bb5c9a7e19b79dec16cbd96f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b6de3fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b6de3fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b6de3fa

Branch: refs/heads/branch-3.0
Commit: 1b6de3fac5122edd634d34cc5525d8152e842697
Parents: cbd81f3
Author: Subru Krishnan 
Authored: Thu Nov 2 17:39:23 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Nov 2 17:40:03 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b6de3fa/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 2aa9a5c..45aa868 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -178,6 +178,18 @@
 
 
   
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. (Daniel Templeton via Subru).

2017-11-02 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53c0fb7ef -> ad0fff2b4


YARN-7432. Fix DominantResourceFairnessPolicy serializable findbugs issues. 
(Daniel Templeton via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad0fff2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad0fff2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad0fff2b

Branch: refs/heads/trunk
Commit: ad0fff2b419e8fe8bb5c9a7e19b79dec16cbd96f
Parents: 53c0fb7
Author: Subru Krishnan 
Authored: Thu Nov 2 17:39:23 2017 -0700
Committer: Subru Krishnan 
Committed: Thu Nov 2 17:39:23 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml| 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad0fff2b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 2aa9a5c..45aa868 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -178,6 +178,18 @@
 
 
   
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15013. Fix ResourceEstimator findbugs issues. (asuresh)

2017-11-02 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 9a61d12c0 -> e44ff3f60


HADOOP-15013. Fix ResourceEstimator findbugs issues. (asuresh)

(cherry picked from commit 53c0fb7efebfac4a79f5cce2dd42cf00411d51e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e44ff3f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e44ff3f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e44ff3f6

Branch: refs/heads/branch-2.9
Commit: e44ff3f60acb8935c154c720897d42c1c1b93dff
Parents: 9a61d12
Author: Arun Suresh 
Authored: Thu Nov 2 17:14:07 2017 -0700
Committer: Arun Suresh 
Committed: Thu Nov 2 17:21:57 2017 -0700

--
 .../service/ResourceEstimatorService.java   | 85 +---
 .../translator/impl/BaseLogParser.java  |  5 +-
 .../translator/impl/LogParserUtil.java  | 11 ++-
 .../service/TestResourceEstimatorService.java   |  2 -
 4 files changed, 53 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e44ff3f6/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 92e..0e0e094 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -65,51 +65,49 @@ import com.google.inject.Singleton;
 @Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
-  private static SkylineStore skylineStore;
-  private static Solver solver;
-  private static LogParser logParser;
-  private static LogParserUtil logParserUtil = new LogParserUtil();
-  private static Configuration config;
-  private static Gson gson;
-  private static Type rleType;
-  private static Type skylineStoreType;
+  private final SkylineStore skylineStore;
+  private final Solver solver;
+  private final LogParser logParser;
+  private final LogParserUtil logParserUtil = new LogParserUtil();
+  private final Configuration config;
+  private final Gson gson;
+  private final Type rleType;
+  private final Type skylineStoreType;
 
   public ResourceEstimatorService() throws ResourceEstimatorException {
-if (skylineStore == null) {
-  try {
-config = new Configuration();
-config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
-skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
-SkylineStore.class);
-logParser = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_TRANSLATOR_PROVIDER,
-LogParser.class);
-logParser.init(config, skylineStore);
-logParserUtil.setLogParser(logParser);
-solver = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.SOLVER_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_SOLVER_PROVIDER,
-Solver.class);
-solver.init(config, skylineStore);
-  } catch (Exception ex) {
-LOGGER
-.error("Server initialization failed due to: {}", ex.getMessage());
-throw new ResourceEstimatorException(ex.getMessage(), ex);
-  }
-  gson = new GsonBuilder()
-  .registerTypeAdapter(Resource.class, new ResourceSerDe())
-  .registerTypeAdapter(RLESparseResourceAllocation.class,
-  new RLESparseResourceAllocationSerDe())
-  .enableComplexMapKeySerialization().create();
-  rleType = new TypeToken() {
-  }.getType();
-  skylineStoreType =
-  new TypeToken>>() {
-  }.getType();
+try {
+  config = new Configuration();
+  config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
+  skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
+  ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
+  ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
+  SkylineStore.class);
+  logParser = ResourceEstimatorUtil.createProviderI

hadoop git commit: HADOOP-15013. Fix ResourceEstimator findbugs issues. (asuresh)

2017-11-02 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1516a9ef6 -> 0200fbac5


HADOOP-15013. Fix ResourceEstimator findbugs issues. (asuresh)

(cherry picked from commit 53c0fb7efebfac4a79f5cce2dd42cf00411d51e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0200fbac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0200fbac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0200fbac

Branch: refs/heads/branch-2
Commit: 0200fbac5b8e51786a8c002d2f812f7c85a512ac
Parents: 1516a9e
Author: Arun Suresh 
Authored: Thu Nov 2 17:14:07 2017 -0700
Committer: Arun Suresh 
Committed: Thu Nov 2 17:18:03 2017 -0700

--
 .../service/ResourceEstimatorService.java   | 85 +---
 .../translator/impl/BaseLogParser.java  |  5 +-
 .../translator/impl/LogParserUtil.java  | 11 ++-
 .../service/TestResourceEstimatorService.java   |  2 -
 4 files changed, 53 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0200fbac/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 92e..0e0e094 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -65,51 +65,49 @@ import com.google.inject.Singleton;
 @Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
-  private static SkylineStore skylineStore;
-  private static Solver solver;
-  private static LogParser logParser;
-  private static LogParserUtil logParserUtil = new LogParserUtil();
-  private static Configuration config;
-  private static Gson gson;
-  private static Type rleType;
-  private static Type skylineStoreType;
+  private final SkylineStore skylineStore;
+  private final Solver solver;
+  private final LogParser logParser;
+  private final LogParserUtil logParserUtil = new LogParserUtil();
+  private final Configuration config;
+  private final Gson gson;
+  private final Type rleType;
+  private final Type skylineStoreType;
 
   public ResourceEstimatorService() throws ResourceEstimatorException {
-if (skylineStore == null) {
-  try {
-config = new Configuration();
-config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
-skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
-SkylineStore.class);
-logParser = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_TRANSLATOR_PROVIDER,
-LogParser.class);
-logParser.init(config, skylineStore);
-logParserUtil.setLogParser(logParser);
-solver = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.SOLVER_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_SOLVER_PROVIDER,
-Solver.class);
-solver.init(config, skylineStore);
-  } catch (Exception ex) {
-LOGGER
-.error("Server initialization failed due to: {}", ex.getMessage());
-throw new ResourceEstimatorException(ex.getMessage(), ex);
-  }
-  gson = new GsonBuilder()
-  .registerTypeAdapter(Resource.class, new ResourceSerDe())
-  .registerTypeAdapter(RLESparseResourceAllocation.class,
-  new RLESparseResourceAllocationSerDe())
-  .enableComplexMapKeySerialization().create();
-  rleType = new TypeToken() {
-  }.getType();
-  skylineStoreType =
-  new TypeToken>>() {
-  }.getType();
+try {
+  config = new Configuration();
+  config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
+  skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
+  ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
+  ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
+  SkylineStore.class);
+  logParser = ResourceEstimatorUtil.createProviderInsta

hadoop git commit: HADOOP-15013. Fix ResourceEstimator findbugs issues. (asuresh)

2017-11-02 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk e6ec02001 -> 53c0fb7ef


HADOOP-15013. Fix ResourceEstimator findbugs issues. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53c0fb7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53c0fb7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53c0fb7e

Branch: refs/heads/trunk
Commit: 53c0fb7efebfac4a79f5cce2dd42cf00411d51e7
Parents: e6ec020
Author: Arun Suresh 
Authored: Thu Nov 2 17:14:07 2017 -0700
Committer: Arun Suresh 
Committed: Thu Nov 2 17:15:33 2017 -0700

--
 .../service/ResourceEstimatorService.java   | 85 +---
 .../translator/impl/BaseLogParser.java  |  5 +-
 .../translator/impl/LogParserUtil.java  | 11 ++-
 .../service/TestResourceEstimatorService.java   |  2 -
 4 files changed, 53 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c0fb7e/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 92e..0e0e094 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -65,51 +65,49 @@ import com.google.inject.Singleton;
 @Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
-  private static SkylineStore skylineStore;
-  private static Solver solver;
-  private static LogParser logParser;
-  private static LogParserUtil logParserUtil = new LogParserUtil();
-  private static Configuration config;
-  private static Gson gson;
-  private static Type rleType;
-  private static Type skylineStoreType;
+  private final SkylineStore skylineStore;
+  private final Solver solver;
+  private final LogParser logParser;
+  private final LogParserUtil logParserUtil = new LogParserUtil();
+  private final Configuration config;
+  private final Gson gson;
+  private final Type rleType;
+  private final Type skylineStoreType;
 
   public ResourceEstimatorService() throws ResourceEstimatorException {
-if (skylineStore == null) {
-  try {
-config = new Configuration();
-config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
-skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
-SkylineStore.class);
-logParser = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_TRANSLATOR_PROVIDER,
-LogParser.class);
-logParser.init(config, skylineStore);
-logParserUtil.setLogParser(logParser);
-solver = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.SOLVER_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_SOLVER_PROVIDER,
-Solver.class);
-solver.init(config, skylineStore);
-  } catch (Exception ex) {
-LOGGER
-.error("Server initialization failed due to: {}", ex.getMessage());
-throw new ResourceEstimatorException(ex.getMessage(), ex);
-  }
-  gson = new GsonBuilder()
-  .registerTypeAdapter(Resource.class, new ResourceSerDe())
-  .registerTypeAdapter(RLESparseResourceAllocation.class,
-  new RLESparseResourceAllocationSerDe())
-  .enableComplexMapKeySerialization().create();
-  rleType = new TypeToken() {
-  }.getType();
-  skylineStoreType =
-  new TypeToken>>() {
-  }.getType();
+try {
+  config = new Configuration();
+  config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
+  skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
+  ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
+  ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
+  SkylineStore.class);
+  logParser = ResourceEstimatorUtil.createProviderInstance(config,
+  ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
+

hadoop git commit: HADOOP-15013. Fix ResourceEstimator findbugs issues. (asuresh)

2017-11-02 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 3d923d66e -> cbd81f305


HADOOP-15013. Fix ResourceEstimator findbugs issues. (asuresh)

(cherry picked from commit 53c0fb7efebfac4a79f5cce2dd42cf00411d51e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbd81f30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbd81f30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbd81f30

Branch: refs/heads/branch-3.0
Commit: cbd81f305bc5f66c6e32d16f99566acde041c069
Parents: 3d923d6
Author: Arun Suresh 
Authored: Thu Nov 2 17:14:07 2017 -0700
Committer: Arun Suresh 
Committed: Thu Nov 2 17:17:27 2017 -0700

--
 .../service/ResourceEstimatorService.java   | 85 +---
 .../translator/impl/BaseLogParser.java  |  5 +-
 .../translator/impl/LogParserUtil.java  | 11 ++-
 .../service/TestResourceEstimatorService.java   |  2 -
 4 files changed, 53 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbd81f30/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
--
diff --git 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
index 92e..0e0e094 100644
--- 
a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
+++ 
b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ResourceEstimatorService.java
@@ -65,51 +65,49 @@ import com.google.inject.Singleton;
 @Singleton @Path("/resourceestimator") public class ResourceEstimatorService {
   private static final Logger LOGGER =
   LoggerFactory.getLogger(ResourceEstimatorService.class);
-  private static SkylineStore skylineStore;
-  private static Solver solver;
-  private static LogParser logParser;
-  private static LogParserUtil logParserUtil = new LogParserUtil();
-  private static Configuration config;
-  private static Gson gson;
-  private static Type rleType;
-  private static Type skylineStoreType;
+  private final SkylineStore skylineStore;
+  private final Solver solver;
+  private final LogParser logParser;
+  private final LogParserUtil logParserUtil = new LogParserUtil();
+  private final Configuration config;
+  private final Gson gson;
+  private final Type rleType;
+  private final Type skylineStoreType;
 
   public ResourceEstimatorService() throws ResourceEstimatorException {
-if (skylineStore == null) {
-  try {
-config = new Configuration();
-config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
-skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
-SkylineStore.class);
-logParser = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.TRANSLATOR_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_TRANSLATOR_PROVIDER,
-LogParser.class);
-logParser.init(config, skylineStore);
-logParserUtil.setLogParser(logParser);
-solver = ResourceEstimatorUtil.createProviderInstance(config,
-ResourceEstimatorConfiguration.SOLVER_PROVIDER,
-ResourceEstimatorConfiguration.DEFAULT_SOLVER_PROVIDER,
-Solver.class);
-solver.init(config, skylineStore);
-  } catch (Exception ex) {
-LOGGER
-.error("Server initialization failed due to: {}", ex.getMessage());
-throw new ResourceEstimatorException(ex.getMessage(), ex);
-  }
-  gson = new GsonBuilder()
-  .registerTypeAdapter(Resource.class, new ResourceSerDe())
-  .registerTypeAdapter(RLESparseResourceAllocation.class,
-  new RLESparseResourceAllocationSerDe())
-  .enableComplexMapKeySerialization().create();
-  rleType = new TypeToken() {
-  }.getType();
-  skylineStoreType =
-  new TypeToken>>() {
-  }.getType();
+try {
+  config = new Configuration();
+  config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE);
+  skylineStore = ResourceEstimatorUtil.createProviderInstance(config,
+  ResourceEstimatorConfiguration.SKYLINESTORE_PROVIDER,
+  ResourceEstimatorConfiguration.DEFAULT_SKYLINESTORE_PROVIDER,
+  SkylineStore.class);
+  logParser = ResourceEstimatorUtil.createProviderI

hadoop git commit: YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely Novák.

2017-11-02 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2dec52da2 -> 85130d80f


YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely 
Novák.

(cherry picked from commit e6ec02001fc4eed9eb51c8653d8f931135e49eda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85130d80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85130d80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85130d80

Branch: refs/heads/branch-2.8
Commit: 85130d80fe730818aea9406669c0a6fb9e9fcddf
Parents: 2dec52d
Author: Eric Payne 
Authored: Thu Nov 2 12:37:33 2017 -0500
Committer: Eric Payne 
Committed: Thu Nov 2 13:38:36 2017 -0500

--
 .../monitor/SchedulingMonitor.java  | 15 +++-
 .../ProportionalCapacityPreemptionPolicy.java   | 75 +++-
 .../CapacitySchedulerConfiguration.java | 11 +--
 ...estProportionalCapacityPreemptionPolicy.java | 42 +--
 .../TestCapacitySchedulerLazyPreemption.java|  2 +-
 5 files changed, 112 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85130d80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index 4cc8989..1297d4e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -75,9 +75,13 @@ public class SchedulingMonitor extends AbstractService {
 return t;
   }
 });
+schedulePreemptionChecker();
+super.serviceStart();
+  }
+
+  private void schedulePreemptionChecker() {
 handler = ses.scheduleAtFixedRate(new PreemptionChecker(),
 0, monitorInterval, TimeUnit.MILLISECONDS);
-super.serviceStart();
   }
 
   @Override
@@ -100,8 +104,13 @@ public class SchedulingMonitor extends AbstractService {
 @Override
 public void run() {
   try {
-//invoke the preemption policy
-invokePolicy();
+if (monitorInterval != scheduleEditPolicy.getMonitoringInterval()) {
+  handler.cancel(true);
+  monitorInterval = scheduleEditPolicy.getMonitoringInterval();
+  schedulePreemptionChecker();
+} else {
+  invokePolicy();
+}
   } catch (Throwable t) {
 // The preemption monitor does not alter structures nor do structures
 // persist across invocations. Therefore, log, skip, and retry.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85130d80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index ee6a27d..c46f0be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -107,6 +107,9 @@ public class ProportionalCapacityPreemptionPolicy
   private float minimumThresholdForIntraQueuePreemption;
   private IntraQueuePreemptionOrderPolicy intraQueuePreemptionOrderPolicy;
 
+  // Current configuration
+  private CapacitySchedulerConfiguration csConfig;
+
   // Pointer to other RM components
   private RMContext rmContext;
   private ResourceCalculator rc;
@@ -120,8 +123,7 @@ 

hadoop git commit: YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely Novák.

2017-11-02 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 7a658363d -> 9a61d12c0


YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely 
Novák.

(cherry picked from commit e6ec02001fc4eed9eb51c8653d8f931135e49eda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a61d12c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a61d12c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a61d12c

Branch: refs/heads/branch-2.9
Commit: 9a61d12c0301704919598ebc93a1e8410fc85562
Parents: 7a65836
Author: Eric Payne 
Authored: Thu Nov 2 12:37:33 2017 -0500
Committer: Eric Payne 
Committed: Thu Nov 2 13:09:27 2017 -0500

--
 .../monitor/SchedulingMonitor.java  | 15 +++-
 .../ProportionalCapacityPreemptionPolicy.java   | 79 +++-
 .../CapacitySchedulerConfiguration.java | 11 +--
 ...estProportionalCapacityPreemptionPolicy.java | 42 +--
 .../TestCapacitySchedulerLazyPreemption.java|  2 +-
 5 files changed, 115 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a61d12c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index 631d1a0..2a741ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -73,9 +73,13 @@ public class SchedulingMonitor extends AbstractService {
 return t;
   }
 });
+schedulePreemptionChecker();
+super.serviceStart();
+  }
+
+  private void schedulePreemptionChecker() {
 handler = ses.scheduleAtFixedRate(new PreemptionChecker(),
 0, monitorInterval, TimeUnit.MILLISECONDS);
-super.serviceStart();
   }
 
   @Override
@@ -98,8 +102,13 @@ public class SchedulingMonitor extends AbstractService {
 @Override
 public void run() {
   try {
-//invoke the preemption policy
-invokePolicy();
+if (monitorInterval != scheduleEditPolicy.getMonitoringInterval()) {
+  handler.cancel(true);
+  monitorInterval = scheduleEditPolicy.getMonitoringInterval();
+  schedulePreemptionChecker();
+} else {
+  invokePolicy();
+}
   } catch (Throwable t) {
 // The preemption monitor does not alter structures nor do structures
 // persist across invocations. Therefore, log, skip, and retry.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a61d12c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 1326c52..860b297 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -109,6 +109,9 @@ public class ProportionalCapacityPreemptionPolicy
   private float minimumThresholdForIntraQueuePreemption;
   private IntraQueuePreemptionOrderPolicy intraQueuePreemptionOrderPolicy;
 
+  // Current configuration
+  private CapacitySchedulerConfiguration csConfig;
+
   // Pointer to other RM components
   private RMContext rmContext;
   private ResourceCalculator rc;
@@ -122,8 +125,7 @@ p

hadoop git commit: YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely Novák.

2017-11-02 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 752f000e0 -> 1516a9ef6


YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely 
Novák.

(cherry picked from commit e6ec02001fc4eed9eb51c8653d8f931135e49eda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1516a9ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1516a9ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1516a9ef

Branch: refs/heads/branch-2
Commit: 1516a9ef61cc9d26fe71154041b1e4624cf9c7ae
Parents: 752f000
Author: Eric Payne 
Authored: Thu Nov 2 12:37:33 2017 -0500
Committer: Eric Payne 
Committed: Thu Nov 2 12:56:41 2017 -0500

--
 .../monitor/SchedulingMonitor.java  | 15 +++-
 .../ProportionalCapacityPreemptionPolicy.java   | 79 +++-
 .../CapacitySchedulerConfiguration.java | 11 +--
 ...estProportionalCapacityPreemptionPolicy.java | 42 +--
 .../TestCapacitySchedulerLazyPreemption.java|  2 +-
 5 files changed, 115 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1516a9ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index 631d1a0..2a741ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -73,9 +73,13 @@ public class SchedulingMonitor extends AbstractService {
 return t;
   }
 });
+schedulePreemptionChecker();
+super.serviceStart();
+  }
+
+  private void schedulePreemptionChecker() {
 handler = ses.scheduleAtFixedRate(new PreemptionChecker(),
 0, monitorInterval, TimeUnit.MILLISECONDS);
-super.serviceStart();
   }
 
   @Override
@@ -98,8 +102,13 @@ public class SchedulingMonitor extends AbstractService {
 @Override
 public void run() {
   try {
-//invoke the preemption policy
-invokePolicy();
+if (monitorInterval != scheduleEditPolicy.getMonitoringInterval()) {
+  handler.cancel(true);
+  monitorInterval = scheduleEditPolicy.getMonitoringInterval();
+  schedulePreemptionChecker();
+} else {
+  invokePolicy();
+}
   } catch (Throwable t) {
 // The preemption monitor does not alter structures nor do structures
 // persist across invocations. Therefore, log, skip, and retry.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1516a9ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 1326c52..860b297 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -109,6 +109,9 @@ public class ProportionalCapacityPreemptionPolicy
   private float minimumThresholdForIntraQueuePreemption;
   private IntraQueuePreemptionOrderPolicy intraQueuePreemptionOrderPolicy;
 
+  // Current configuration
+  private CapacitySchedulerConfiguration csConfig;
+
   // Pointer to other RM components
   private RMContext rmContext;
   private ResourceCalculator rc;
@@ -122,8 +125,7 @@ publi

hadoop git commit: YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely Novák.

2017-11-02 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fb442e9a7 -> 3d923d66e


YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely 
Novák.

(cherry picked from commit e6ec02001fc4eed9eb51c8653d8f931135e49eda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d923d66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d923d66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d923d66

Branch: refs/heads/branch-3.0
Commit: 3d923d66e86807bab343916e4dccf75973194236
Parents: fb442e9
Author: Eric Payne 
Authored: Thu Nov 2 12:37:33 2017 -0500
Committer: Eric Payne 
Committed: Thu Nov 2 12:48:07 2017 -0500

--
 .../monitor/SchedulingMonitor.java  | 15 +++-
 .../ProportionalCapacityPreemptionPolicy.java   | 79 +++-
 .../CapacitySchedulerConfiguration.java | 11 +--
 ...estProportionalCapacityPreemptionPolicy.java | 42 +--
 .../TestCapacitySchedulerLazyPreemption.java|  2 +-
 5 files changed, 115 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d923d66/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index 631d1a0..2a741ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -73,9 +73,13 @@ public class SchedulingMonitor extends AbstractService {
 return t;
   }
 });
+schedulePreemptionChecker();
+super.serviceStart();
+  }
+
+  private void schedulePreemptionChecker() {
 handler = ses.scheduleAtFixedRate(new PreemptionChecker(),
 0, monitorInterval, TimeUnit.MILLISECONDS);
-super.serviceStart();
   }
 
   @Override
@@ -98,8 +102,13 @@ public class SchedulingMonitor extends AbstractService {
 @Override
 public void run() {
   try {
-//invoke the preemption policy
-invokePolicy();
+if (monitorInterval != scheduleEditPolicy.getMonitoringInterval()) {
+  handler.cancel(true);
+  monitorInterval = scheduleEditPolicy.getMonitoringInterval();
+  schedulePreemptionChecker();
+} else {
+  invokePolicy();
+}
   } catch (Throwable t) {
 // The preemption monitor does not alter structures nor do structures
 // persist across invocations. Therefore, log, skip, and retry.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d923d66/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index c4c98e2..2c072d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -108,6 +108,9 @@ public class ProportionalCapacityPreemptionPolicy
   private float minimumThresholdForIntraQueuePreemption;
   private IntraQueuePreemptionOrderPolicy intraQueuePreemptionOrderPolicy;
 
+  // Current configuration
+  private CapacitySchedulerConfiguration csConfig;
+
   // Pointer to other RM components
   private RMContext rmContext;
   private ResourceCalculator rc;
@@ -121,8 +124,7 @@ p

hadoop git commit: YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely Novák.

2017-11-02 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/trunk 04c604cf1 -> e6ec02001


YARN-7370: Preemption properties should be refreshable. Contrubted by Gergely 
Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6ec0200
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6ec0200
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6ec0200

Branch: refs/heads/trunk
Commit: e6ec02001fc4eed9eb51c8653d8f931135e49eda
Parents: 04c604c
Author: Eric Payne 
Authored: Thu Nov 2 12:37:33 2017 -0500
Committer: Eric Payne 
Committed: Thu Nov 2 12:37:33 2017 -0500

--
 .../monitor/SchedulingMonitor.java  | 15 +++-
 .../ProportionalCapacityPreemptionPolicy.java   | 79 +++-
 .../CapacitySchedulerConfiguration.java | 11 +--
 ...estProportionalCapacityPreemptionPolicy.java | 42 +--
 .../TestCapacitySchedulerLazyPreemption.java|  2 +-
 5 files changed, 115 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6ec0200/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index 631d1a0..2a741ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -73,9 +73,13 @@ public class SchedulingMonitor extends AbstractService {
 return t;
   }
 });
+schedulePreemptionChecker();
+super.serviceStart();
+  }
+
+  private void schedulePreemptionChecker() {
 handler = ses.scheduleAtFixedRate(new PreemptionChecker(),
 0, monitorInterval, TimeUnit.MILLISECONDS);
-super.serviceStart();
   }
 
   @Override
@@ -98,8 +102,13 @@ public class SchedulingMonitor extends AbstractService {
 @Override
 public void run() {
   try {
-//invoke the preemption policy
-invokePolicy();
+if (monitorInterval != scheduleEditPolicy.getMonitoringInterval()) {
+  handler.cancel(true);
+  monitorInterval = scheduleEditPolicy.getMonitoringInterval();
+  schedulePreemptionChecker();
+} else {
+  invokePolicy();
+}
   } catch (Throwable t) {
 // The preemption monitor does not alter structures nor do structures
 // persist across invocations. Therefore, log, skip, and retry.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6ec0200/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index c4c98e2..2c072d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -108,6 +108,9 @@ public class ProportionalCapacityPreemptionPolicy
   private float minimumThresholdForIntraQueuePreemption;
   private IntraQueuePreemptionOrderPolicy intraQueuePreemptionOrderPolicy;
 
+  // Current configuration
+  private CapacitySchedulerConfiguration csConfig;
+
   // Pointer to other RM components
   private RMContext rmContext;
   private ResourceCalculator rc;
@@ -121,8 +124,7 @@ public class ProportionalCapacityPreemptionPolicy
   new HashMap<>();
   priv

[43/50] [abbrv] hadoop git commit: MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang zhenyi.

2017-11-02 Thread haibochen
MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang 
zhenyi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc3f3eca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc3f3eca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc3f3eca

Branch: refs/heads/YARN-1011
Commit: cc3f3eca409f5e57e540849a80e6448bb4924cc6
Parents: cde56b9
Author: Akira Ajisaka 
Authored: Thu Nov 2 18:32:24 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 2 18:32:24 2017 +0900

--
 .../java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc3f3eca/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 9bf8e47..2335854 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -68,7 +68,7 @@ class DynamicInputChunk {
* Method to write records into a chunk.
* @param key Key from the listing file.
* @param value Corresponding value from the listing file.
-   * @throws IOException Exception onf failure to write to the file.
+   * @throws IOException Exception on failure to write to the file.
*/
   public void write(Text key, CopyListingFileStatus value) throws IOException {
 writer.append(key, value);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo Chen)

2017-11-02 Thread haibochen
YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo 
Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8dba7d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8dba7d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8dba7d0

Branch: refs/heads/YARN-1011
Commit: b8dba7d09a275ad8cbe592a75b554e0e997c6b68
Parents: 75d19c1
Author: Haibo Chen 
Authored: Mon Jul 10 09:55:42 2017 -0700
Committer: Haibo Chen 
Committed: Thu Nov 2 10:07:36 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 36 +--
 .../src/main/resources/yarn-default.xml | 42 ++--
 .../server/api/records/ResourceThresholds.java  | 11 +++-
 .../monitor/ContainersMonitorImpl.java  | 67 +++-
 4 files changed, 124 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dba7d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 1552f6c..e843ad2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1878,17 +1878,39 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
-  /** Overallocation (= allocation based on utilization) configs. */
-  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
-  NM_PREFIX + "overallocation.allocation-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
-  = 0f;
+  /**
+   * General overallocation threshold if no resource-type-specific
+   * threshold is provided.
+   */
+  public static final String NM_OVERALLOCATION_GENERAL_THRESHOLD =
+  NM_PREFIX + "overallocation.general-utilization-threshold";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_GENERAL_THRESHOLD = -1.0f;
+  /**
+   * The maximum value of utilization threshold for all resource types
+   * up to which the scheduler allocates OPPORTUNISTIC containers.
+   */
   @Private
-  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final float MAX_NM_OVERALLOCATION_THRESHOLD = 0.95f;
+
+  /**
+   * NM CPU utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_CPU_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.cpu-utilization-threshold";
+
+  /**
+   * NM memory utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.memory-utilization-threshold";
+
   public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
   NM_PREFIX + "overallocation.preemption-threshold";
   public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0f;
+  = 0.96f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8dba7d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 5f4a89c..fdae629 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1621,14 +1621,44 @@
 
   
 The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node 
that
+  applies to all resource types (expressed as a float between 0 and 0.95).
+  By default, over-allocation is turned off (value = -1). When turned on,
+  the node allows running OPPORTUNISTIC containers when the aggregate
+  utilization for each resource type is under the value specified here
+  multiplied by the node's advertised cap

[32/50] [abbrv] hadoop git commit: HDFS-12219. Javadoc for FSNamesystem#getMaxObjects is incorrect. Contributed by Erik Krogen.

2017-11-02 Thread haibochen
HDFS-12219. Javadoc for FSNamesystem#getMaxObjects is incorrect. Contributed by 
Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20304b91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20304b91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20304b91

Branch: refs/heads/YARN-1011
Commit: 20304b91cc1513e3d82a01d36f4ee9c4c81b60e4
Parents: b8c8b5b
Author: Yiqun Lin 
Authored: Wed Nov 1 14:37:08 2017 +0800
Committer: Yiqun Lin 
Committed: Wed Nov 1 14:37:08 2017 +0800

--
 .../java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20304b91/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index e8d7161..4e9a05d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4606,9 +4606,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
   }
 
-  /**
-   * Get the total number of objects in the system. 
-   */
   @Override // FSNamesystemMBean
   public long getMaxObjects() {
 return maxFsObjects;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HADOOP-14997. Add hadoop-aliyun as dependency of hadoop-cloud-storage. Contributed by Genmao Yu

2017-11-02 Thread haibochen
HADOOP-14997. Add hadoop-aliyun as dependency of hadoop-cloud-storage. 
Contributed by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cde56b9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cde56b9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cde56b9c

Branch: refs/heads/YARN-1011
Commit: cde56b9cefe1eb2943eef56a6aa7fdfa1b78e909
Parents: 178751e
Author: Sammi Chen 
Authored: Thu Nov 2 14:26:16 2017 +0800
Committer: Sammi Chen 
Committed: Thu Nov 2 17:12:04 2017 +0800

--
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cde56b9c/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 9711e52..73a9d41 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -105,6 +105,11 @@
 
 
   org.apache.hadoop
+  hadoop-aliyun
+  compile
+
+
+  org.apache.hadoop
   hadoop-aws
   compile
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: YARN-7396. NPE when accessing container logs due to null dirsHandler. Contributed by Jonathan Hung

2017-11-02 Thread haibochen
YARN-7396. NPE when accessing container logs due to null dirsHandler. 
Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cc98ae0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cc98ae0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cc98ae0

Branch: refs/heads/YARN-1011
Commit: 0cc98ae0ec69419ded066f3f7decf59728b35e9d
Parents: 7a49ddf
Author: Jian He 
Authored: Wed Nov 1 17:00:32 2017 -0700
Committer: Jian He 
Committed: Wed Nov 1 17:00:32 2017 -0700

--
 .../org/apache/hadoop/yarn/server/nodemanager/NodeManager.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cc98ae0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 7a6106e..bddc7c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -367,6 +367,8 @@ public class NodeManager extends CompositeService
 
 this.aclsManager = new ApplicationACLsManager(conf);
 
+this.dirsHandler = new LocalDirsHandlerService(metrics);
+
 boolean isDistSchedulingEnabled =
 conf.getBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED,
 YarnConfiguration.DEFAULT_DIST_SCHEDULING_ENABLED);
@@ -390,7 +392,6 @@ public class NodeManager extends CompositeService
 // NodeManager level dispatcher
 this.dispatcher = new AsyncDispatcher("NM Event dispatcher");
 
-dirsHandler = new LocalDirsHandlerService(metrics);
 nodeHealthChecker =
 new NodeHealthCheckerService(
 getNodeHealthScriptRunner(conf), dirsHandler);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: MAPREDUCE-6983. Moving logging APIs over to slf4j in hadoop-mapreduce-client-core. Contributed by Jinjiang Ling.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/178751ed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
index b1cb6dc..46e4f1a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -27,8 +27,6 @@ import org.apache.avro.io.Encoder;
 import org.apache.avro.io.EncoderFactory;
 import org.apache.avro.specific.SpecificDatumWriter;
 import org.apache.avro.util.Utf8;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -36,6 +34,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.CounterGroup;
 import org.apache.hadoop.mapreduce.Counters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -55,7 +55,7 @@ public class EventWriter {
   private DatumWriter writer =
 new SpecificDatumWriter(Event.class);
   private Encoder encoder;
-  private static final Log LOG = LogFactory.getLog(EventWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(EventWriter.class);
 
   /**
* avro encoding format supported by EventWriter.
@@ -112,7 +112,7 @@ public class EventWriter {
   out.close();
   out = null;
 } finally {
-  IOUtils.cleanup(LOG, out);
+  IOUtils.cleanupWithLogger(LOG, out);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/178751ed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
index 28fcc92..6efb4f7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
@@ -24,8 +24,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -45,6 +43,8 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Default Parser for the JobHistory files. Typical usage is
@@ -56,7 +56,8 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 @InterfaceStability.Unstable
 public class JobHistoryParser implements HistoryEventHandler {
 
-  private static final Log LOG = LogFactory.getLog(JobHistoryParser.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(JobHistoryParser.class);
   
   private final FSDataInputStream in;
   private JobInfo info = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/178751ed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
index 325c375..dd1ebdb 10

[50/50] [abbrv] hadoop git commit: YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)

2017-11-02 Thread haibochen
YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75d19c1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75d19c1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75d19c1c

Branch: refs/heads/YARN-1011
Commit: 75d19c1c8ad9c46f3633bfc57a645ba27d7a4115
Parents: 04c604c
Author: Karthik Kambatla 
Authored: Fri Jan 29 14:31:45 2016 -0800
Committer: Haibo Chen 
Committed: Thu Nov 2 10:07:36 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 ++-
 .../src/main/resources/yarn-default.xml |  21 
 .../RegisterNodeManagerRequest.java |  14 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java|  45 +++-
 .../server/api/records/OverAllocationInfo.java  |  45 
 .../server/api/records/ResourceThresholds.java  |  45 
 .../impl/pb/OverAllocationInfoPBImpl.java   | 106 +++
 .../impl/pb/ResourceThresholdsPBImpl.java   |  93 
 .../yarn_server_common_service_protos.proto |  10 ++
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  17 +++
 .../nodemanager/NodeStatusUpdaterImpl.java  |   7 +-
 .../monitor/ContainersMonitorImpl.java  |  34 ++
 .../amrmproxy/BaseAMRMProxyTest.java|  11 ++
 14 files changed, 455 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75d19c1c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 640e86e..1552f6c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1871,7 +1871,6 @@ public class YarnConfiguration extends Configuration {
   public static final boolean 
DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
   false;
 
-
   // Configurations for applicaiton life time monitor feature
   public static final String RM_APPLICATION_MONITOR_INTERVAL_MS =
   RM_PREFIX + "application-timeouts.monitor.interval-ms";
@@ -1879,6 +1878,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
+  /** Overallocation (= allocation based on utilization) configs. */
+  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
+  NM_PREFIX + "overallocation.allocation-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
+  = 0f;
+  @Private
+  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
+  = 0f;
+
   /**
* Interval of time the linux container executor should try cleaning up
* cgroups entry when cleaning up a container. This is required due to what 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75d19c1c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index f4b2e61..5f4a89c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1620,6 +1620,27 @@
   
 
   
+The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node,
+  expressed as a float between 0 and 0.95. By default, over-allocation is
+  turned off (value = 0). When turned on, the node allows running
+  OPPORTUNISTIC containers when the aggregate utilization is under the
+  value specified here multiplied by the node's advertised capacity.
+
+yarn.nodemanager.overallocation.allocation-threshold
+0f
+  
+
+  
+When a node is over-allocated to improve utilization by
+  running OPPORTUNISTIC containers, this confi

[33/50] [abbrv] hadoop git commit: HDFS-12744. More logs when short-circuit read is failed and disabled. Contributed by Weiwei Yang.

2017-11-02 Thread haibochen
HDFS-12744. More logs when short-circuit read is failed and disabled. 
Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56b88b06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56b88b06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56b88b06

Branch: refs/heads/YARN-1011
Commit: 56b88b06705441f6f171eec7fb2fa77946ca204b
Parents: 20304b9
Author: Weiwei Yang 
Authored: Wed Nov 1 16:41:45 2017 +0800
Committer: Weiwei Yang 
Committed: Wed Nov 1 16:41:45 2017 +0800

--
 .../org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java| 3 ++-
 .../java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java  | 2 ++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b88b06/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 5a22c33..6f3fc61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -646,7 +646,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
 default:
   LOG.warn(this + ": unknown response code " + resp.getStatus() +
   " while attempting to set up short-circuit access. " +
-  resp.getMessage());
+  resp.getMessage() + ". Disabling short-circuit read for DataNode "
+  + datanode + " temporarily.");
   clientContext.getDomainSocketFactory()
   .disableShortCircuitForPath(pathInfo.getPath());
   return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b88b06/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 3216a78..b78fc9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -387,6 +387,8 @@ class DataXceiver extends Receiver implements Runnable {
   } catch (IOException e) {
 bld.setStatus(ERROR);
 bld.setMessage(e.getMessage());
+LOG.error("Request short-circuit read file descriptor" +
+" failed with unknown error.", e);
   }
   bld.build().writeDelimitedTo(socketOut);
   if (fis != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: MAPREDUCE-6983. Moving logging APIs over to slf4j in hadoop-mapreduce-client-core. Contributed by Jinjiang Ling.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/178751ed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
index 2e2dbe1..588500c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.mapreduce.jobhistory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapred.TaskAttemptID;
 import org.apache.hadoop.mapred.TaskID;
 import org.apache.hadoop.mapred.TaskStatus;
@@ -30,6 +28,8 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.skyscreamer.jsonassert.JSONAssert;
 import org.skyscreamer.jsonassert.JSONCompareMode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
@@ -38,8 +38,8 @@ import java.util.TimeZone;
 
 public class TestHistoryViewerPrinter {
 
-  private static final Log LOG = LogFactory.getLog(
-  TestHistoryViewerPrinter.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestHistoryViewerPrinter.class);
 
   @Test
   public void testHumanPrinter() throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/178751ed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
index 7e72802..4c847fa 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
@@ -28,8 +28,6 @@ import javax.annotation.Nullable;
 
 import org.junit.Assert;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
@@ -48,6 +46,8 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
@@ -57,7 +57,8 @@ import com.google.common.collect.Sets;
 @RunWith(value = Parameterized.class)
 public class TestFileInputFormat {
   
-  private static final Log LOG = LogFactory.getLog(TestFileInputFormat.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestFileInputFormat.class);
   
   private static String testTmpDir = System.getProperty("test.build.data", 
"/tmp");
   private static final Path TEST_ROOT_DIR = new Path(testTmpDir, "TestFIF");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/178751ed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
index abbfcb2..f72aa55 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
@@ -3

[34/50] [abbrv] hadoop git commit: YARN-7276 addendum to add timeline service depencies. Contributed by Inigo Goiri.

2017-11-02 Thread haibochen
YARN-7276 addendum to add timeline service depencies. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70f1a947
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70f1a947
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70f1a947

Branch: refs/heads/YARN-1011
Commit: 70f1a9470cd7a08e7b48df99e16e57ed6f2c13dc
Parents: 56b88b0
Author: Inigo Goiri 
Authored: Wed Nov 1 13:26:37 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Nov 1 13:26:37 2017 -0700

--
 .../hadoop-yarn-server/hadoop-yarn-server-router/pom.xml   | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70f1a947/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
index 3e7cc11..edfc8ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
@@ -86,6 +86,12 @@
 
 
 
+  org.apache.hadoop
+  hadoop-yarn-server-timelineservice
+  test
+
+
+
   org.mockito
   mockito-all
   test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: MAPREDUCE-6983. Moving logging APIs over to slf4j in hadoop-mapreduce-client-core. Contributed by Jinjiang Ling.

2017-11-02 Thread haibochen
MAPREDUCE-6983. Moving logging APIs over to slf4j in 
hadoop-mapreduce-client-core. Contributed by Jinjiang Ling.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/178751ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/178751ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/178751ed

Branch: refs/heads/YARN-1011
Commit: 178751ed8c9d47038acf8616c226f1f52e884feb
Parents: 940ffe3
Author: Akira Ajisaka 
Authored: Thu Nov 2 17:42:52 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 2 17:43:08 2017 +0900

--
 .../org/apache/hadoop/mapred/BackupStore.java |  7 ---
 .../org/apache/hadoop/mapred/CleanupQueue.java|  8 
 .../java/org/apache/hadoop/mapred/Counters.java   |  4 ++--
 .../DeprecatedQueueConfigurationParser.java   |  8 
 .../org/apache/hadoop/mapred/FileInputFormat.java |  8 
 .../apache/hadoop/mapred/FileOutputCommitter.java |  6 +++---
 .../main/java/org/apache/hadoop/mapred/IFile.java |  6 +++---
 .../apache/hadoop/mapred/IFileInputStream.java|  7 ---
 .../java/org/apache/hadoop/mapred/IndexCache.java |  6 +++---
 .../org/apache/hadoop/mapred/JobACLsManager.java  |  6 +++---
 .../java/org/apache/hadoop/mapred/JobConf.java|  6 +++---
 .../org/apache/hadoop/mapred/JobEndNotifier.java  |  8 
 .../java/org/apache/hadoop/mapred/JvmContext.java |  8 
 .../apache/hadoop/mapred/LineRecordReader.java|  8 
 .../java/org/apache/hadoop/mapred/MapTask.java|  7 ---
 .../java/org/apache/hadoop/mapred/Merger.java |  6 +++---
 .../main/java/org/apache/hadoop/mapred/Queue.java | 10 +-
 .../hadoop/mapred/QueueConfigurationParser.java   |  8 
 .../org/apache/hadoop/mapred/QueueManager.java|  6 +++---
 .../java/org/apache/hadoop/mapred/ReduceTask.java |  9 +
 .../org/apache/hadoop/mapred/SortedRanges.java|  8 
 .../main/java/org/apache/hadoop/mapred/Task.java  | 18 +-
 .../java/org/apache/hadoop/mapred/TaskLog.java| 11 +--
 .../java/org/apache/hadoop/mapred/TaskStatus.java |  8 
 .../org/apache/hadoop/mapred/jobcontrol/Job.java  |  6 +++---
 .../mapred/lib/FieldSelectionMapReduce.java   |  7 ---
 .../apache/hadoop/mapred/lib/InputSampler.java|  6 +++---
 .../hadoop/mapred/lib/MultithreadedMapRunner.java |  8 
 .../apache/hadoop/mapred/pipes/Application.java   |  7 ---
 .../hadoop/mapred/pipes/BinaryProtocol.java   |  8 
 .../apache/hadoop/mapred/pipes/PipesReducer.java  |  7 ---
 .../org/apache/hadoop/mapred/pipes/Submitter.java |  6 +++---
 .../java/org/apache/hadoop/mapreduce/Cluster.java |  7 ---
 .../org/apache/hadoop/mapreduce/CryptoUtils.java  |  6 +++---
 .../java/org/apache/hadoop/mapreduce/Job.java |  6 +++---
 .../hadoop/mapreduce/JobResourceUploader.java |  7 ---
 .../hadoop/mapreduce/JobSubmissionFiles.java  |  7 ---
 .../org/apache/hadoop/mapreduce/JobSubmitter.java | 11 +--
 .../mapreduce/counters/AbstractCounters.java  |  7 ---
 .../counters/FileSystemCounterGroup.java  |  7 ---
 .../mapreduce/counters/FrameworkCounterGroup.java |  7 ---
 .../hadoop/mapreduce/jobhistory/EventWriter.java  |  8 
 .../mapreduce/jobhistory/JobHistoryParser.java|  7 ---
 .../mapreduce/lib/db/BigDecimalSplitter.java  |  7 ---
 .../hadoop/mapreduce/lib/db/DBInputFormat.java|  7 ---
 .../hadoop/mapreduce/lib/db/DBOutputFormat.java   |  7 ---
 .../hadoop/mapreduce/lib/db/DBRecordReader.java   |  7 ---
 .../mapreduce/lib/db/DataDrivenDBInputFormat.java |  7 ---
 .../lib/db/DataDrivenDBRecordReader.java  |  7 ---
 .../hadoop/mapreduce/lib/db/DateSplitter.java |  6 +++---
 .../hadoop/mapreduce/lib/db/FloatSplitter.java|  7 ---
 .../mapreduce/lib/db/OracleDBRecordReader.java|  7 ---
 .../lib/db/OracleDataDrivenDBInputFormat.java |  3 ---
 .../hadoop/mapreduce/lib/db/TextSplitter.java |  6 +++---
 .../lib/fieldsel/FieldSelectionMapper.java|  7 ---
 .../lib/fieldsel/FieldSelectionReducer.java   |  7 ---
 .../lib/input/CombineFileInputFormat.java |  7 ---
 .../mapreduce/lib/input/FileInputFormat.java  |  7 ---
 .../lib/input/FixedLengthRecordReader.java|  8 
 .../mapreduce/lib/input/LineRecordReader.java |  7 ---
 .../lib/input/SequenceFileInputFilter.java|  9 +
 .../mapreduce/lib/jobcontrol/ControlledJob.java   |  7 ---
 .../mapreduce/lib/jobcontrol/JobControl.java  |  6 +++---
 .../mapreduce/lib/map/MultithreadedMapper.java|  7 ---
 .../mapreduce/lib/output/FileOutputCommitter.java |  7 ---
 .../lib/output/PartialFileOutputCommitter.java|  8 
 .../mapreduce/lib/partition/InputSampler.java |  6 +++---
 .../lib/partition

[47/50] [abbrv] hadoop git commit: YARN-4511. Common scheduler changes to support scheduler-specific oversubscription implementations.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4cfabf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index 740ef33..c56be29 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -563,7 +563,7 @@ public class TestNodeLabelContainerAllocation {
   int numContainers) {
 CapacityScheduler cs = (CapacityScheduler) 
rm.getRMContext().getScheduler();
 SchedulerNode node = cs.getSchedulerNode(nodeId);
-Assert.assertEquals(numContainers, node.getNumContainers());
+Assert.assertEquals(numContainers, node.getNumGuaranteedContainers());
   }
 
   /**
@@ -1065,7 +1065,7 @@ public class TestNodeLabelContainerAllocation {
 for (int i = 0; i < 50; i++) {
   cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
   cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
-  if (schedulerNode1.getNumContainers() == 0) {
+  if (schedulerNode1.getNumGuaranteedContainers() == 0) {
 cycleWaited++;
   }
 }
@@ -1131,7 +1131,7 @@ public class TestNodeLabelContainerAllocation {
 CSAMContainerLaunchDiagnosticsConstants.LAST_NODE_PROCESSED_MSG
 + nodeIdStr + " ( Partition : [x]"));
 Assert.assertEquals(0, cs.getSchedulerNode(nm1.getNodeId())
-.getNumContainers());
+.getNumGuaranteedContainers());
 
 rm1.close();
   }
@@ -1215,7 +1215,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x
-Assert.assertEquals(10, schedulerNode1.getNumContainers());
+Assert.assertEquals(10, schedulerNode1.getNumGuaranteedContainers());
 
 // check non-exclusive containers of LeafQueue is correctly updated
 LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
@@ -1943,7 +1943,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x
-Assert.assertEquals(5, schedulerNode1.getNumContainers());
+Assert.assertEquals(5, schedulerNode1.getNumGuaranteedContainers());
 
 SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
 .getNodeReport(nm1.getNodeId());
@@ -2043,7 +2043,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x (non-exclusive)
-Assert.assertEquals(3, schedulerNode1.getNumContainers());
+Assert.assertEquals(3, schedulerNode1.getNumGuaranteedContainers());
 
 SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
 .getNodeReport(nm1.getNodeId());
@@ -2074,7 +2074,7 @@ public class TestNodeLabelContainerAllocation {
 cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
 
 // app1 gets all resource in default partition
-Assert.assertEquals(2, schedulerNode2.getNumContainers());
+Assert.assertEquals(2, schedulerNode2.getNumGuaranteedContainers());
 
 // 3GB is used from label x quota. 2GB used from default label.
 // So total 2.5 GB is remaining.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4cfabf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index 854a65c..d432991 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/had

[36/50] [abbrv] hadoop git commit: YARN-7412. Fix unit test for docker mount check on ubuntu. (Contributed by Eric Badger)

2017-11-02 Thread haibochen
YARN-7412. Fix unit test for docker mount check on ubuntu.  (Contributed by 
Eric Badger)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a49ddfd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a49ddfd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a49ddfd

Branch: refs/heads/YARN-1011
Commit: 7a49ddfdde2e2a7b407f4a62a42d97bfe456075a
Parents: d57dba9
Author: Eric Yang 
Authored: Wed Nov 1 18:39:56 2017 -0400
Committer: Eric Yang 
Committed: Wed Nov 1 18:39:56 2017 -0400

--
 .../test/utils/test_docker_util.cc  | 62 ++--
 1 file changed, 31 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a49ddfd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
index 80de58d..96b5d40 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test_docker_util.cc
@@ -429,12 +429,12 @@ namespace ContainerExecutor {
   }
 
   TEST_F(TestDockerUtil, test_check_mount_permitted) {
-const char *permitted_mounts[] = {"/etc", "/usr/bin/touch", "/tmp/", NULL};
+const char *permitted_mounts[] = {"/etc", "/usr/bin/cut", "/tmp/", NULL};
 std::vector > test_data;
 test_data.push_back(std::make_pair("/etc", 1));
 test_data.push_back(std::make_pair("/etc/", 1));
 test_data.push_back(std::make_pair("/etc/passwd", 1));
-test_data.push_back(std::make_pair("/usr/bin/touch", 1));
+test_data.push_back(std::make_pair("/usr/bin/cut", 1));
 test_data.push_back(std::make_pair("//usr/", 0));
 test_data.push_back(std::make_pair("/etc/random-file", 
-1));
 
@@ -447,8 +447,8 @@ namespace ContainerExecutor {
 
   TEST_F(TestDockerUtil, test_normalize_mounts) {
 const int entries = 4;
-const char *permitted_mounts[] = {"/home", "/etc", "/usr/bin/touch", NULL};
-const char *expected[] = {"/home/", "/etc/", "/usr/bin/touch", NULL};
+const char *permitted_mounts[] = {"/home", "/etc", "/usr/bin/cut", NULL};
+const char *expected[] = {"/home/", "/etc/", "/usr/bin/cut", NULL};
 char **ptr = static_cast(malloc(entries * sizeof(char *)));
 for (int i = 0; i < entries; ++i) {
   if (permitted_mounts[i] != NULL) {
@@ -660,7 +660,7 @@ namespace ContainerExecutor {
 const int buff_len = 1024;
 char buff[buff_len];
 int ret = 0;
-std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/opt,/var,/usr/bin/touch,..\n  "
+std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/opt,/var,/usr/bin/cut,..\n  "
   
"docker.allowed.ro-mounts=/etc/passwd";
 std::vector > file_cmd_vec;
 file_cmd_vec.push_back(std::make_pair(
@@ -668,8 +668,8 @@ namespace ContainerExecutor {
 file_cmd_vec.push_back(std::make_pair(
 "[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/var/:/var/", "-v '/var/:/var/' "));
 file_cmd_vec.push_back(std::make_pair(
-"[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/usr/bin/touch:/usr/bin/touch",
- "-v '/usr/bin/touch:/usr/bin/touch' "));
+"[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/usr/bin/cut:/usr/bin/cut",
+ "-v '/usr/bin/cut:/usr/bin/cut' "));
 file_cmd_vec.push_back(std::make_pair(
 "[docker-command-execution]\n  docker-command=run\n  
rw-mounts=/opt:/mydisk1,/var/log/:/mydisk2",
 "-v '/opt:/mydisk1' -v '/var/log/:/mydisk2' "));
@@ -767,7 +767,7 @@ namespace ContainerExecutor {
 char buff[buff_len];
 int ret = 0;
 
-std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/home/,/var,/usr/bin/touch,..\n  "
+std::string container_executor_cfg_contents = "[docker]\n  
docker.allowed.rw-mounts=/home/,/var,/usr/bin/cut,..\n  "
   
"docker.allowed.ro-mounts=/etc/passwd,/etc/group";
 std::vector > file_cmd_vec;
 file_cmd_vec.push_back(std::make_pair(
@@ -779,8 +779,8 @@ namespace ContainerExecutor {
 file_cmd_vec

[45/50] [abbrv] hadoop git commit: YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via wangda)

2017-11-02 Thread haibochen
YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via 
wangda)

Change-Id: Ia5336f407147d2985a61b5f9f1e69b5b35f398a4
(cherry picked from commit 752f000e0f521ca7be173d6ca338da4585284fd0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04c604cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04c604cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04c604cf

Branch: refs/heads/YARN-1011
Commit: 04c604cf1d4e2c52b01a224e9d4a9888a3d8c740
Parents: d00b6f7
Author: Wangda Tan 
Authored: Thu Nov 2 09:50:00 2017 -0700
Committer: Wangda Tan 
Committed: Thu Nov 2 09:51:28 2017 -0700

--
 .../src/main/webapp/app/routes/cluster-overview.js  | 5 -
 .../src/main/webapp/app/utils/href-address-utils.js | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04c604cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
index 3c6abd4..d03ea0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
@@ -28,7 +28,10 @@ export default AbstractRoute.extend({
 {
   state: "RUNNING"
 }),
-  queues: this.store.query('yarn-queue.yarn-queue', {}),
+  queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => {
+let type = model.get('firstObject').get('type');
+return this.store.query("yarn-queue." + type + "-queue", {});
+  }),
 });
   },
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04c604cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
index e36de4b..896d448 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
@@ -22,6 +22,6 @@ export default {
   },
 
   getQueueLink: function(queueName) {
-return '#/yarn-queue/' + queueName;
+return '#/yarn-queue/' + queueName + '/info';
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: YARN-7407. Moving logging APIs over to slf4j in hadoop-yarn-applications. Contributed by Yeliang Cang.

2017-11-02 Thread haibochen
YARN-7407. Moving logging APIs over to slf4j in hadoop-yarn-applications. 
Contributed by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/785f1b0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/785f1b0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/785f1b0d

Branch: refs/heads/YARN-1011
Commit: 785f1b0d11a3bf0af9851c080ff0acc34539f17b
Parents: fad22d8
Author: Akira Ajisaka 
Authored: Tue Oct 31 16:36:02 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 31 16:36:02 2017 +0900

--
 .../distributedshell/ApplicationMaster.java   | 13 +++--
 .../yarn/applications/distributedshell/Client.java|  9 +
 .../ContainerLaunchFailAppMaster.java | 10 +-
 .../distributedshell/TestDSFailedAppMaster.java   |  7 ---
 .../distributedshell/TestDSSleepingAppMaster.java |  8 +---
 .../distributedshell/TestDistributedShell.java| 14 +++---
 .../TestDistributedShellWithNodeLabels.java   |  8 
 .../unmanagedamlauncher/UnmanagedAMLauncher.java  |  9 +
 .../unmanagedamlauncher/TestUnmanagedAMLauncher.java  | 12 ++--
 9 files changed, 48 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/785f1b0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 6d838c0..91dbc00 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -47,8 +47,6 @@ import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -113,6 +111,8 @@ import org.apache.log4j.LogManager;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An ApplicationMaster for executing shell commands on a set of launched
@@ -179,7 +179,8 @@ import com.sun.jersey.api.client.ClientHandlerException;
 @InterfaceStability.Unstable
 public class ApplicationMaster {
 
-  private static final Log LOG = LogFactory.getLog(ApplicationMaster.class);
+  private static final Logger LOG = LoggerFactory
+  .getLogger(ApplicationMaster.class);
 
   @VisibleForTesting
   @Private
@@ -349,7 +350,7 @@ public class ApplicationMaster {
   appMaster.run();
   result = appMaster.finish();
 } catch (Throwable t) {
-  LOG.fatal("Error running ApplicationMaster", t);
+  LOG.error("Error running ApplicationMaster", t);
   LogManager.shutdown();
   ExitUtil.terminate(1, t);
 }
@@ -388,7 +389,7 @@ public class ApplicationMaster {
 } catch (IOException e) {
   e.printStackTrace();
 } finally {
-  IOUtils.cleanup(LOG, buf);
+  IOUtils.cleanupWithLogger(LOG, buf);
 }
   }
 
@@ -630,7 +631,7 @@ public class ApplicationMaster {
 LOG.info("Executing with tokens:");
 while (iter.hasNext()) {
   Token token = iter.next();
-  LOG.info(token);
+  LOG.info(token.toString());
   if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
 iter.remove();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/785f1b0d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/

[30/50] [abbrv] hadoop git commit: HDFS-12482. Provide a configuration to adjust the weight of EC recovery tasks to adjust the speed of recovery. (lei)

2017-11-02 Thread haibochen
HDFS-12482. Provide a configuration to adjust the weight of EC recovery tasks 
to adjust the speed of recovery. (lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9367c25d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9367c25d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9367c25d

Branch: refs/heads/YARN-1011
Commit: 9367c25dbdfedf60cdbd65611281cf9c667829e6
Parents: ed24da3
Author: Lei Xu 
Authored: Tue Oct 31 21:58:14 2017 -0700
Committer: Lei Xu 
Committed: Tue Oct 31 21:58:14 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../server/datanode/DataNodeFaultInjector.java  |  6 ++
 .../erasurecode/ErasureCodingWorker.java| 12 +++-
 .../erasurecode/StripedBlockReconstructor.java  |  2 +
 .../src/main/resources/hdfs-default.xml | 13 
 .../src/site/markdown/HDFSErasureCoding.md  |  6 ++
 .../hadoop/hdfs/TestReconstructStripedFile.java | 64 
 7 files changed, 106 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9367c25d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3d1f0b6..37071b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -596,6 +596,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT = 5000; //5s
   public static final String  DFS_DN_EC_RECONSTRUCTION_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.threads";
   public static final int DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT = 8;
+  public static final String  DFS_DN_EC_RECONSTRUCTION_XMITS_WEIGHT_KEY =
+  "dfs.datanode.ec.reconstruction.xmits.weight";
+  public static final float   DFS_DN_EC_RECONSTRUCTION_XMITS_WEIGHT_DEFAULT =
+  0.5f;
 
   public static final String
   DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9367c25d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0a2a60b..1dd779e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -89,4 +89,10 @@ public class DataNodeFaultInjector {
 
   public void throwTooManyOpenFiles() throws FileNotFoundException {
   }
+
+  /**
+   * Used as a hook to inject failure in erasure coding reconstruction
+   * process.
+   */
+  public void stripedBlockReconstruction() throws IOException {}
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9367c25d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 63498bc..45e29ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.erasurecode;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -47,6 +48,7 @@ public final class ErasureCodingWorker {
 
   private final DataNode datanode;
   private final Configuration conf;
+  private final float xmitWeight;
 
   private ThreadPoolExecutor stripedReconstructionPool;
   private ThreadPoolExecutor stripedReadPool;
@@ -54,6 +56,14 @@ public final cl

[28/50] [abbrv] hadoop git commit: Revert "HDFS-12499. dfs.namenode.shared.edits.dir property is currently namenode specific key. Contributed by Bharat Viswanadham."

2017-11-02 Thread haibochen
Revert "HDFS-12499. dfs.namenode.shared.edits.dir property is currently 
namenode specific key. Contributed by Bharat Viswanadham."

This reverts commit b922ba7393bd97b98e90f50f01b4cc664c44adb9.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f681fa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f681fa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f681fa8

Branch: refs/heads/YARN-1011
Commit: 5f681fa8216fb43dff8a3d21bf21e91d6c6f6d9c
Parents: b922ba7
Author: Andrew Wang 
Authored: Tue Oct 31 10:46:10 2017 -0700
Committer: Andrew Wang 
Committed: Tue Oct 31 10:46:10 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java| 4 ++--
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java| 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f681fa8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 6125dea..32b873b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -248,6 +248,7 @@ public class NameNode extends ReconfigurableBase implements
 DFS_NAMENODE_RPC_BIND_HOST_KEY,
 DFS_NAMENODE_NAME_DIR_KEY,
 DFS_NAMENODE_EDITS_DIR_KEY,
+DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
 DFS_NAMENODE_CHECKPOINT_DIR_KEY,
 DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
 DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY,
@@ -277,8 +278,7 @@ public class NameNode extends ReconfigurableBase implements
* for a specific namenode.
*/
   public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
-  DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
-  DFS_NAMENODE_SHARED_EDITS_DIR_KEY
+DFS_HA_AUTO_FAILOVER_ENABLED_KEY
   };
 
   private String ipcClientRPCBackoffEnable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f681fa8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 64d2322..39f76a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -402,7 +402,7 @@ public class TestDFSUtil {
   public void testSomeConfsNNSpecificSomeNSSpecific() {
 final HdfsConfiguration conf = new HdfsConfiguration();
 
-String key = DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
+String key = DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 conf.set(key, "global-default");
 conf.set(key + ".ns1", "ns1-override");
 conf.set(key + ".ns1.nn1", "nn1-override");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: addendum patch for YARN-7289.

2017-11-02 Thread haibochen
addendum patch for YARN-7289.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/940ffe3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/940ffe3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/940ffe3f

Branch: refs/heads/YARN-1011
Commit: 940ffe3f9ce74286a8863e3743faf88a33c817a3
Parents: 0cc98ae
Author: Rohith Sharma K S 
Authored: Thu Nov 2 13:55:19 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Nov 2 13:55:19 2017 +0530

--
 .../scheduler/AbstractYarnScheduler.java|  4 ++--
 .../scheduler/capacity/CapacityScheduler.java   |  2 +-
 .../rmapp/TestApplicationLifetimeMonitor.java   | 24 +++-
 3 files changed, 16 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/940ffe3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index f2da1fe..8ce6eb8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1351,8 +1351,8 @@ public abstract class AbstractYarnScheduler
 
   @Override
   public long checkAndGetApplicationLifetime(String queueName, long lifetime) {
-// -1 indicates, lifetime is not configured.
-return -1;
+// Lifetime is the application lifetime by default.
+return lifetime;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/940ffe3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d91aa55..ca289b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2591,7 +2591,7 @@ public class CapacityScheduler extends
   long maximumApplicationLifetime =
   ((LeafQueue) queue).getMaximumApplicationLifetime();
 
-  // check only for maximum, that's enough because default cann't
+  // check only for maximum, that's enough because default can't
   // exceed maximum
   if (maximumApplicationLifetime <= 0) {
 return lifetimeRequestedByApp;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/940ffe3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
index 4f88480..e1d8716 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rm

[31/50] [abbrv] hadoop git commit: HDFS-12714. Hadoop 3 missing fix for HDFS-5169. Contributed by Joe McDonnell.

2017-11-02 Thread haibochen
HDFS-12714. Hadoop 3 missing fix for HDFS-5169. Contributed by Joe McDonnell.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8c8b5bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8c8b5bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8c8b5bc

Branch: refs/heads/YARN-1011
Commit: b8c8b5bc274211b29be125e5463662795a363f84
Parents: 9367c25
Author: John Zhuge 
Authored: Tue Oct 31 21:33:26 2017 -0700
Committer: John Zhuge 
Committed: Tue Oct 31 22:44:16 2017 -0700

--
 .../hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8c8b5bc/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index 5b8bc7f..55fef24 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -2688,7 +2688,7 @@ static int translateZCRException(JNIEnv *env, jthrowable 
exc)
 ret = EPROTONOSUPPORT;
 goto done;
 }
-ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+ret = printExceptionAndFree(env, exc, PRINT_EXC_ALL,
 "hadoopZeroCopyRead: ZeroCopyCursor#read failed");
 done:
 free(className);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HDFS-12699. TestMountTable fails with Java 7. Contributed by Inigo Goiri.

2017-11-02 Thread haibochen
HDFS-12699. TestMountTable fails with Java 7. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/982bd2a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/982bd2a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/982bd2a5

Branch: refs/heads/YARN-1011
Commit: 982bd2a5bff4c490a5d62f2e4fd0d3755305349c
Parents: d015e0b
Author: Inigo Goiri 
Authored: Tue Oct 31 10:21:42 2017 -0700
Committer: Inigo Goiri 
Committed: Tue Oct 31 10:21:42 2017 -0700

--
 .../hdfs/server/federation/store/records/TestMountTable.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/982bd2a5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
index b6f91cf..739d2e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/records/TestMountTable.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
-import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -113,7 +112,7 @@ public class TestMountTable {
   @Test
   public void testReadOnly() throws IOException {
 
-Map dest = new HashMap<>();
+Map dest = new LinkedHashMap<>();
 dest.put(DST_NS_0, DST_PATH_0);
 dest.put(DST_NS_1, DST_PATH_1);
 MountTable record1 = MountTable.newInstance(SRC, dest);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-7379. Moving logging APIs over to slf4j in hadoop-yarn-client. Contributed by Yeliang Cang.

2017-11-02 Thread haibochen
YARN-7379. Moving logging APIs over to slf4j in hadoop-yarn-client. Contributed 
by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c02d2ba5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c02d2ba5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c02d2ba5

Branch: refs/heads/YARN-1011
Commit: c02d2ba50db8a355ea03081c3984b2ea0c375a3f
Parents: 785f1b0
Author: Akira Ajisaka 
Authored: Tue Oct 31 17:09:45 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 31 17:09:45 2017 +0900

--
 .../org/apache/hadoop/yarn/client/api/AMRMClient.java |  7 ---
 .../hadoop/yarn/client/api/async/AMRMClientAsync.java |  7 ---
 .../yarn/client/api/async/impl/AMRMClientAsyncImpl.java   |  7 ---
 .../yarn/client/api/async/impl/NMClientAsyncImpl.java |  7 ---
 .../hadoop/yarn/client/api/impl/AMRMClientImpl.java   |  7 ---
 .../client/api/impl/ContainerManagementProtocolProxy.java |  7 ---
 .../apache/hadoop/yarn/client/api/impl/NMClientImpl.java  |  7 ---
 .../hadoop/yarn/client/api/impl/RemoteRequestsTable.java  |  7 ---
 .../yarn/client/api/impl/SharedCacheClientImpl.java   |  8 
 .../hadoop/yarn/client/api/impl/YarnClientImpl.java   |  7 ---
 .../java/org/apache/hadoop/yarn/client/cli/TopCLI.java|  7 ---
 .../java/org/apache/hadoop/yarn/client/TestGetGroups.java |  7 ---
 .../org/apache/hadoop/yarn/client/TestRMFailover.java | 10 +-
 ...ResourceManagerAdministrationProtocolPBClientImpl.java |  8 
 .../yarn/client/api/async/impl/TestAMRMClientAsync.java   |  7 ---
 .../yarn/client/api/impl/AMRMTokenIdentifierForTest.java  |  7 ---
 .../apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java |  7 ---
 .../apache/hadoop/yarn/client/api/impl/TestNMClient.java  |  8 
 .../yarn/client/api/impl/TestSharedCacheClientImpl.java   |  8 
 .../hadoop/yarn/client/api/impl/TestYarnClient.java   | 10 +++---
 20 files changed, 80 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c02d2ba5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index e86bd12..d3d1974 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -23,8 +23,6 @@ import java.util.Collection;
 import java.util.function.Supplier;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -48,12 +46,15 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public abstract class AMRMClient extends
 AbstractService {
-  private static final Log LOG = LogFactory.getLog(AMRMClient.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AMRMClient.class);
 
   private TimelineV2Client timelineV2Client;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c02d2ba5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
index 44a36af..793ad79 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
@@ -24,8 +24,6 @@ import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Supplier;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFac

[25/50] [abbrv] hadoop git commit: HDFS-7878. API - expose a unique file identifier.

2017-11-02 Thread haibochen
HDFS-7878. API - expose a unique file identifier.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d015e0bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d015e0bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d015e0bb

Branch: refs/heads/YARN-1011
Commit: d015e0bbd5416943cb4875274e67b7077c00e54b
Parents: 8122543
Author: Chris Douglas 
Authored: Tue Oct 31 09:44:01 2017 -0700
Committer: Chris Douglas 
Committed: Tue Oct 31 09:44:01 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   |  46 
 .../org/apache/hadoop/fs/FilterFileSystem.java  |  12 +
 .../org/apache/hadoop/fs/HarFileSystem.java |  14 ++
 .../main/java/org/apache/hadoop/fs/Options.java | 180 ++
 .../java/org/apache/hadoop/fs/PathHandle.java   |  50 
 .../org/apache/hadoop/fs/RawPathHandle.java | 119 +
 .../src/site/markdown/filesystem/filesystem.md  | 115 +
 .../fs/contract/AbstractContractOpenTest.java   | 247 ++-
 .../hadoop/fs/contract/ContractOptions.java |  10 +
 .../hadoop/fs/contract/ContractTestUtils.java   |  32 +++
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  21 ++
 .../hadoop/hdfs/DistributedFileSystem.java  |  54 
 .../hadoop/hdfs/protocol/HdfsConstants.java |   4 +
 .../hadoop/hdfs/protocol/HdfsPathHandle.java|  98 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  17 +-
 .../src/main/proto/hdfs.proto   |   7 +
 .../hdfs/server/namenode/FSDirectory.java   |  10 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java |  16 ++
 .../hdfs/TestFileStatusSerialization.java   |  78 --
 .../src/test/resources/contract/hdfs.xml|  12 +-
 20 files changed, 1102 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d015e0bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index d43e41d..64021ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.Options.HandleOpt;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
@@ -951,6 +952,51 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   }
 
   /**
+   * Open an FSDataInputStream matching the PathHandle instance. The
+   * implementation may encode metadata in PathHandle to address the
+   * resource directly and verify that the resource referenced
+   * satisfies constraints specified at its construciton.
+   * @param fd PathHandle object returned by the FS authority.
+   * @param bufferSize the size of the buffer to use
+   * @throws IOException IO failure
+   * @throws UnsupportedOperationException If not overridden by subclass
+   */
+  public FSDataInputStream open(PathHandle fd, int bufferSize)
+  throws IOException {
+throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Create a durable, serializable handle to the referent of the given
+   * entity.
+   * @param stat Referent in the target FileSystem
+   * @param opt If absent, assume {@link HandleOpt#path()}.
+   * @throws IllegalArgumentException If the FileStatus does not belong to
+   * this FileSystem
+   * @throws UnsupportedOperationException If
+   * {@link #createPathHandle(FileStatus, HandleOpt[])}
+   * not overridden by subclass.
+   * @throws UnsupportedOperationException If this FileSystem cannot enforce
+   * the specified constraints.
+   */
+  public final PathHandle getPathHandle(FileStatus stat, HandleOpt... opt) {
+if (null == opt || 0 == opt.length) {
+  return createPathHandle(stat, HandleOpt.path());
+}
+return createPathHandle(stat, opt);
+  }
+
+  /**
+   * Hook to implement support for {@link PathHandle} operations.
+   * @param stat Referent in the target FileSystem
+   * @param opt Constraints that determine the validity of the
+   *{@link PathHandle} reference.
+   */
+  protected PathHandle createPathHandle(FileStatus stat, HandleOpt... opt) {
+throw new UnsupportedOperationException();
+  }
+
+  /**
* Create an F

[44/50] [abbrv] hadoop git commit: YARN-7286. Add support for docker to have no capabilities. Contributed by Eric Badger

2017-11-02 Thread haibochen
YARN-7286. Add support for docker to have no capabilities. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d00b6f7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d00b6f7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d00b6f7c

Branch: refs/heads/YARN-1011
Commit: d00b6f7c1ff2d7569ae9efdc6823ebcfb86ef2d4
Parents: cc3f3ec
Author: Jason Lowe 
Authored: Thu Nov 2 09:37:17 2017 -0500
Committer: Jason Lowe 
Committed: Thu Nov 2 09:37:17 2017 -0500

--
 .../src/main/resources/yarn-default.xml |  3 +-
 .../runtime/DockerLinuxContainerRuntime.java| 30 --
 .../runtime/TestDockerContainerRuntime.java | 43 
 3 files changed, 71 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d00b6f7c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 8487e72..f4b2e61 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1623,7 +1623,8 @@
 This configuration setting determines the capabilities
   assigned to docker containers when they are launched. While these may not
   be case-sensitive from a docker perspective, it is best to keep these
-  uppercase.
+  uppercase. To run without any capabilites, set this value to
+  "none" or "NONE"
 yarn.nodemanager.runtime.linux.docker.capabilities
 
CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d00b6f7c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 6f7b6fd..a425cf8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -58,6 +58,7 @@ import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -187,6 +188,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   private boolean enableUserReMapping;
   private int userRemappingUidThreshold;
   private int userRemappingGidThreshold;
+  private Set capabilities;
 
   /**
* Return whether the given environment variables indicate that the operation
@@ -285,6 +287,30 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 userRemappingGidThreshold = conf.getInt(
   YarnConfiguration.NM_DOCKER_USER_REMAPPING_GID_THRESHOLD,
   YarnConfiguration.DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD);
+
+capabilities = getDockerCapabilitiesFromConf();
+  }
+
+  private Set getDockerCapabilitiesFromConf() throws
+  ContainerExecutionException {
+Set caps = new HashSet<>(Arrays.asList(
+conf.getTrimmedStrings(
+YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
+YarnConfiguration.DEFAULT_NM_DOCKER_CONTAINER_CAPABILITIES)));
+if(caps.contains("none") || caps.contains("NONE")) {
+  if(caps.size() > 1) {
+String msg = "Mixing capabilities with the none keyword is" +
+" not supported";
+throw new ContainerExecutionException(msg);
+  }
+  caps = Collections.emptySet();
+}
+
+return caps;
+  }
+
+  public Set getCapabilities() {
+return capabilities;
   }
 
   @Override
@@ -602,10 +628,6 @@ public class Do

[18/50] [abbrv] hadoop git commit: HADOOP-14980. [JDK9] Upgrade maven-javadoc-plugin to 3.0.0-M1. Contributed by liyunzhang.

2017-11-02 Thread haibochen
HADOOP-14980. [JDK9] Upgrade maven-javadoc-plugin to 3.0.0-M1. Contributed by 
liyunzhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b59e9255
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b59e9255
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b59e9255

Branch: refs/heads/YARN-1011
Commit: b59e92551d67e05c34e8b5b90d375c3e106afcd1
Parents: a8083aa
Author: Akira Ajisaka 
Authored: Tue Oct 31 13:49:15 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 31 13:49:15 2017 +0900

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b59e9255/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 01ff290..f0b3c8e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -98,7 +98,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 2.4
 3.0.2
 3.0.0-M1
-2.10.4
+3.0.0-M1
 1.5
 
1.5
 3.0.1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: YARN-6705 Add separate NM preemption thresholds for cpu and memory (Haibo Chen)

2017-11-02 Thread haibochen
YARN-6705 Add separate NM preemption thresholds for cpu and memory  (Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b80cad7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b80cad7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b80cad7

Branch: refs/heads/YARN-1011
Commit: 4b80cad710f07de215de0c7cf9eef6f1e0641dc2
Parents: b8dba7d
Author: Haibo Chen 
Authored: Wed Jul 12 12:32:13 2017 -0700
Committer: Haibo Chen 
Committed: Thu Nov 2 10:07:36 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 31 +--
 .../src/main/resources/yarn-default.xml | 34 ++--
 .../monitor/ContainersMonitorImpl.java  | 42 +---
 3 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b80cad7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e843ad2..f9c8b69 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1907,10 +1907,33 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
   NM_PREFIX + "overallocation.memory-utilization-threshold";
 
-  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
-  NM_PREFIX + "overallocation.preemption-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0.96f;
+  /**
+   * The CPU utilization threshold, if went beyond for a few times in a row,
+   * OPPORTUNISTIC containers started due to overallocation should start
+   * getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.cpu";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD = 0.99f;
+
+  /**
+   * The number of times that CPU utilization must go over the CPU preemption
+   * threshold consecutively before preemption starts to kick in.
+   */
+  public static final String NM_OVERALLOCATION_PREEMPTION_CPU_COUNT =
+  NM_PREFIX + "overallocation.preemption-threshold-count.cpu";
+  public static final int DEFAULT_NM_OVERALLOCATION_PREEMPTION_CPU_COUNT = 4;
+
+
+  /**
+   * The memory utilization threshold beyond which OPPORTUNISTIC containers
+   * started due to overallocation should start getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.memory";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD = 0.95f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b80cad7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index fdae629..89d4d1f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1663,11 +1663,37 @@
 
   
 When a node is over-allocated to improve utilization by
-  running OPPORTUNISTIC containers, this config captures the utilization
-  beyond which OPPORTUNISTIC containers should start getting preempted.
+  running OPPORTUNISTIC containers, this config captures the CPU
+  utilization beyond which OPPORTUNISTIC containers should start getting
+  preempted. This is used in combination with
+  yarn.nodemanager.overallocation.preemption-threshold-count.cpu, that is,
+  only when the CPU utilization goes over this threshold consecutively for
+  a few times will preemption kicks in.
 
-yarn.nodemanager.overallocation.preemption-threshold
-0.96
+yarn.nodemanager.overallocation.preemption-threshold.cpu
+0.99
+  
+
+  
+When a node is over-allocated to improve utilization by
+  running OPPORTUNIST

[48/50] [abbrv] hadoop git commit: YARN-4511. Common scheduler changes to support scheduler-specific oversubscription implementations.

2017-11-02 Thread haibochen
YARN-4511. Common scheduler changes to support scheduler-specific 
oversubscription implementations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4cfabf2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4cfabf2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4cfabf2

Branch: refs/heads/YARN-1011
Commit: a4cfabf28b0ef3fe7944ffe9ec6a209d5c19e7db
Parents: 4b80cad
Author: Haibo Chen 
Authored: Thu Nov 2 09:12:19 2017 -0700
Committer: Haibo Chen 
Committed: Thu Nov 2 10:07:36 2017 -0700

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   6 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   6 +
 .../resourcemanager/ResourceTrackerService.java |   3 +-
 .../monitor/capacity/TempSchedulerNode.java |   2 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   7 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  13 +-
 .../scheduler/AbstractYarnScheduler.java|   4 +-
 .../scheduler/ClusterNodeTracker.java   |   6 +-
 .../scheduler/SchedulerNode.java| 317 +++
 .../scheduler/SchedulerNodeReport.java  |   4 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +-
 .../allocator/RegularContainerAllocator.java|   4 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   2 +-
 .../common/fica/FiCaSchedulerNode.java  |  11 +-
 .../scheduler/fair/FSPreemptionThread.java  |   2 +-
 .../scheduler/fair/FSSchedulerNode.java |   9 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   6 +
 .../TestWorkPreservingRMRestart.java|  26 +-
 ...alCapacityPreemptionPolicyMockFramework.java |   2 +-
 ...alCapacityPreemptionPolicyMockFramework.java |   6 +-
 .../scheduler/TestAbstractYarnScheduler.java|   4 +-
 .../scheduler/TestSchedulerNode.java| 393 +++
 .../capacity/TestCapacityScheduler.java |   2 +-
 .../TestCapacitySchedulerAsyncScheduling.java   |   8 +-
 .../scheduler/capacity/TestLeafQueue.java   |   4 +-
 .../TestNodeLabelContainerAllocation.java   |  14 +-
 .../fair/TestContinuousScheduling.java  |  42 +-
 .../scheduler/fair/TestFSSchedulerNode.java |  18 +-
 .../scheduler/fair/TestFairScheduler.java   |  14 +-
 .../scheduler/fifo/TestFifoScheduler.java   |   4 +-
 30 files changed, 780 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4cfabf2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index e71ddff..b92a3d1 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
+import org.apache.hadoop.yarn.server.api.records.OverAllocationInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode
@@ -206,6 +207,11 @@ public class NodeInfo {
 }
 
 @Override
+public OverAllocationInfo getOverAllocationInfo() {
+  return null;
+}
+
+@Override
 public long getUntrackedTimeStamp() {
   return 0;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4cfabf2/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 6b7ac3c..b95b58a 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
+import org.apache.h

[29/50] [abbrv] hadoop git commit: YARN-6413. FileSystem based Yarn Registry implementation. (Ellen Hui via Subru).

2017-11-02 Thread haibochen
YARN-6413. FileSystem based Yarn Registry implementation. (Ellen Hui via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed24da3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed24da3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed24da3d

Branch: refs/heads/YARN-1011
Commit: ed24da3dd73c137b44235e525112056ace6d3843
Parents: 5f681fa
Author: Subru Krishnan 
Authored: Tue Oct 31 12:05:43 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Oct 31 12:05:43 2017 -0700

--
 .../impl/FSRegistryOperationsService.java   | 249 
 .../registry/client/types/ServiceRecord.java|  64 
 .../impl/TestFSRegistryOperationsService.java   | 298 +++
 3 files changed, 611 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed24da3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
new file mode 100644
index 000..cfff1bd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java
@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.registry.client.impl;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidPathnameException;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * Filesystem-based implementation of RegistryOperations. This class relies
+ * entirely on the configured FS for security and does no extra checks.
+ */
+public class FSRegistryOperationsService extends CompositeService
+implements RegistryOperations {
+
+  private FileSystem fs;
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FSRegistryOperationsService.class);
+  private final RegistryUtils.ServiceRecordMarshal serviceRecordMarshal =
+  new RegistryUtils.ServiceRecordMarshal();
+
+  public FSRegistryOperationsService() {
+super(FSRegistryOperationsService.class.getName());
+  }
+
+  @VisibleForTesting
+  public FileSystem getFs() {
+return this.fs;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) {
+try {
+  this.fs = FileSystem.get(conf);
+  LOG.info

[23/50] [abbrv] hadoop git commit: HADOOP-14919. BZip2 drops records when reading data in splits. Contributed by Jason Lowe

2017-11-02 Thread haibochen
HADOOP-14919. BZip2 drops records when reading data in splits. Contributed by 
Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fae63aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fae63aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fae63aa

Branch: refs/heads/YARN-1011
Commit: 2fae63aa60c43b62bd908a9499562fe528603185
Parents: c02d2ba
Author: Jason Lowe 
Authored: Tue Oct 31 09:30:13 2017 -0500
Committer: Jason Lowe 
Committed: Tue Oct 31 09:30:13 2017 -0500

--
 .../apache/hadoop/io/compress/BZip2Codec.java   | 39 +-
 .../io/compress/bzip2/CBZip2InputStream.java| 32 +
 .../hadoop/mapred/TestTextInputFormat.java  | 76 
 3 files changed, 98 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fae63aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index 331606e..db78118 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -204,43 +204,8 @@ public class BZip2Codec implements Configurable, 
SplittableCompressionCodec {
   Seekable.class.getName());
 }
 
-//find the position of first BZip2 start up marker
-((Seekable)seekableIn).seek(0);
-
-// BZip2 start of block markers are of 6 bytes.  But the very first block
-// also has "BZh9", making it 10 bytes.  This is the common case.  But at
-// time stream might start without a leading BZ.
-final long FIRST_BZIP2_BLOCK_MARKER_POSITION =
-  CBZip2InputStream.numberOfBytesTillNextMarker(seekableIn);
-long adjStart = 0L;
-if (start != 0) {
-  // Other than the first of file, the marker size is 6 bytes.
-  adjStart = Math.max(0L, start - (FIRST_BZIP2_BLOCK_MARKER_POSITION
-  - (HEADER_LEN + SUB_HEADER_LEN)));
-}
-
-((Seekable)seekableIn).seek(adjStart);
-SplitCompressionInputStream in =
-  new BZip2CompressionInputStream(seekableIn, adjStart, end, readMode);
-
-
-// The following if clause handles the following case:
-// Assume the following scenario in BZip2 compressed stream where
-// . represent compressed data.
-// .[48 bit Block].[48 bit   Block].[48 bit Block]...
-// [47 bits][1 bit].[48 bit Block]...
-// ^[Assume a Byte alignment here]
-// ^^[current position of stream]
-// .^^[We go back 10 Bytes in stream and find a Block 
marker]
-// ^^[We align at wrong position!]
-// ...^^[While 
this pos is correct]
-
-if (in.getPos() < start) {
-  ((Seekable)seekableIn).seek(start);
-  in = new BZip2CompressionInputStream(seekableIn, start, end, readMode);
-}
-
-return in;
+((Seekable)seekableIn).seek(start);
+return new BZip2CompressionInputStream(seekableIn, start, end, readMode);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fae63aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
index 1f7632b..bb02cf2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
@@ -52,20 +52,20 @@ import 
org.apache.hadoop.io.compress.SplittableCompressionCodec.READ_MODE;
  * This Ant code was enhanced so that it can de-compress blocks of bzip2 data.
  * Current position in the stream is an important statistic for Hadoop. For
  * example in LineRecordReader, we solely depend on the current position in the
- * stream to know about the progess. The notion of position becomes complicated
+ * stream to know about the progress. The notion of position becomes 
complicated
  * for compressed files. The Hadoop splitting is done in terms of compressed
  * file. Bu

[35/50] [abbrv] hadoop git commit: YARN-7400. Incorrect log preview displayed in jobhistory server ui. Contributed by Xuan Gong.

2017-11-02 Thread haibochen
YARN-7400. Incorrect log preview displayed in jobhistory server ui. Contributed 
by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d57dba99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d57dba99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d57dba99

Branch: refs/heads/YARN-1011
Commit: d57dba99428cbe3f3dfcec834d79f709e7529ef9
Parents: 70f1a94
Author: Junping Du 
Authored: Wed Nov 1 14:48:16 2017 -0700
Committer: Junping Du 
Committed: Wed Nov 1 14:48:16 2017 -0700

--
 .../filecontroller/ifile/IndexedFileAggregatedLogsBlock.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d57dba99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
index 5439b53..db2915a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
@@ -221,8 +221,8 @@ public class IndexedFileAggregatedLogsBlock extends 
LogAggregationHtmlBlock {
   __(" for the full log.").__();
 }
 long totalSkipped = 0;
-while (totalSkipped < start) {
-  long ret = in.skip(start - totalSkipped);
+while (totalSkipped < startIndex) {
+  long ret = in.skip(startIndex - totalSkipped);
   if (ret == 0) {
 //Read one byte
 int nextByte = in.read();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HDFS-206. Support for head in FSShell. Contributed by Gabor Bota.

2017-11-02 Thread haibochen
HDFS-206. Support for head in FSShell. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81225430
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81225430
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81225430

Branch: refs/heads/YARN-1011
Commit: 81225430d0153e8515f2ecb56ece7fcfe289d5ee
Parents: 2fae63a
Author: Sean Mackrory 
Authored: Mon Oct 30 16:35:42 2017 -0600
Committer: Sean Mackrory 
Committed: Tue Oct 31 08:34:01 2017 -0600

--
 .../org/apache/hadoop/fs/shell/FsCommand.java   |  1 +
 .../java/org/apache/hadoop/fs/shell/Head.java   | 78 
 .../src/site/markdown/FileSystemShell.md| 13 
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 27 +++
 4 files changed, 119 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81225430/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
index f274f67..4a13414 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
@@ -64,6 +64,7 @@ abstract public class FsCommand extends Command {
 factory.registerCommands(SetReplication.class);
 factory.registerCommands(Stat.class);
 factory.registerCommands(Tail.class);
+factory.registerCommands(Head.class);
 factory.registerCommands(Test.class);
 factory.registerCommands(Touch.class);
 factory.registerCommands(Truncate.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81225430/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Head.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Head.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Head.java
new file mode 100644
index 000..2280225
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Head.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.PathIsDirectoryException;
+import org.apache.hadoop.io.IOUtils;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Show the first 1KB of the file.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+
+class Head extends FsCommand {
+  public static void registerCommands(CommandFactory factory) {
+factory.addClass(Head.class, "-head");
+  }
+  public static final String NAME = "head";
+  public static final String USAGE = "";
+  public static final String DESCRIPTION =
+  "Show the first 1KB of the file.\n";
+
+  private long endingOffset = 1024;
+
+  @Override
+  protected void processOptions(LinkedList args) throws IOException {
+CommandFormat cf = new CommandFormat(1, 1);
+cf.parse(args);
+  }
+
+  @Override
+  protected List expandArgument(String arg) throws IOException {
+List items = new LinkedList();
+items.add(new PathData(arg, getConf()));
+return items;
+  }
+
+  @Override
+  protected void processPath(PathData item) throws IOException {
+if (item.stat.isDirectory()) {
+  throw new PathIsDirectoryException(item.toString());
+}
+
+dumpToOffset(item);
+  }
+
+  private void dumpToOffset(PathData item) throws IOException {
+FSDataInputStream in = item.fs.open(item.path);
+try {
+  IOUtils.c

[20/50] [abbrv] hadoop git commit: HADOOP-14980. [JDK9] Upgrade maven-javadoc-plugin to 3.0.0-M1. Contributed by ligongyi.

2017-11-02 Thread haibochen
HADOOP-14980. [JDK9] Upgrade maven-javadoc-plugin to 3.0.0-M1. Contributed by 
ligongyi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fad22d8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fad22d8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fad22d8a

Branch: refs/heads/YARN-1011
Commit: fad22d8ab47ab6d16de0b0d52ba6a7dd9380f6f7
Parents: 60503f9
Author: Akira Ajisaka 
Authored: Tue Oct 31 13:51:26 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 31 13:51:26 2017 +0900

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fad22d8a/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 01ff290..f0b3c8e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -98,7 +98,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 2.4
 3.0.2
 3.0.0-M1
-2.10.4
+3.0.0-M1
 1.5
 
1.5
 3.0.1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: Revert "HADOOP-14980. [JDK9] Upgrade maven-javadoc-plugin to 3.0.0-M1. Contributed by liyunzhang."

2017-11-02 Thread haibochen
Revert "HADOOP-14980. [JDK9] Upgrade maven-javadoc-plugin to 3.0.0-M1. 
Contributed by liyunzhang."

This reverts commit b59e92551d67e05c34e8b5b90d375c3e106afcd1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60503f9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60503f9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60503f9d

Branch: refs/heads/YARN-1011
Commit: 60503f9d4f32576b59c20b8305705e70e6295597
Parents: b59e925
Author: Akira Ajisaka 
Authored: Tue Oct 31 13:50:28 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Oct 31 13:50:28 2017 +0900

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60503f9d/pom.xml
--
diff --git a/pom.xml b/pom.xml
index f0b3c8e..01ff290 100644
--- a/pom.xml
+++ b/pom.xml
@@ -98,7 +98,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 2.4
 3.0.2
 3.0.0-M1
-3.0.0-M1
+2.10.4
 1.5
 
1.5
 3.0.1


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HADOOP-14990. Clean up jdiff xml files added for 2.8.2 release.

2017-11-02 Thread haibochen
HADOOP-14990. Clean up jdiff xml files added for 2.8.2 release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8083aaa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8083aaa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8083aaa

Branch: refs/heads/YARN-1011
Commit: a8083aaa6f4bcc7287f2b65018a946a840123c50
Parents: d64736d
Author: Junping Du 
Authored: Mon Oct 30 15:16:51 2017 -0700
Committer: Junping Du 
Committed: Mon Oct 30 15:16:51 2017 -0700

--
 .../Apache_Hadoop_YARN_Client_2.8.2.xml | 2316 ---
 .../Apache_Hadoop_YARN_Common_2.8.2.xml | 2665 --
 .../Apache_Hadoop_YARN_Server_Common_2.8.2.xml  |  829 --
 .../jdiff/Apache_Hadoop_YARN_Client_2.8.2.xml   | 2316 +++
 .../jdiff/Apache_Hadoop_YARN_Common_2.8.2.xml   | 2665 ++
 .../Apache_Hadoop_YARN_Server_Common_2.8.2.xml  |  829 ++
 6 files changed, 5810 insertions(+), 5810 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HADOOP-14990. Clean up jdiff xml files added for 2.8.2 release.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8083aaa/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Client_2.8.2.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Client_2.8.2.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Client_2.8.2.xml
deleted file mode 100644
index f95a17f..000
--- 
a/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Client_2.8.2.xml
+++ /dev/null
@@ -1,2316 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-
-  
-  
-  
-  
-
-  
-  
-  
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
- 

[13/50] [abbrv] hadoop git commit: HADOOP-14990. Clean up jdiff xml files added for 2.8.2 release.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8083aaa/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.2.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.2.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.2.xml
new file mode 100644
index 000..f95a17f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.2.xml
@@ -0,0 +1,2316 @@
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+ 

[14/50] [abbrv] hadoop git commit: HADOOP-14990. Clean up jdiff xml files added for 2.8.2 release.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8083aaa/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Server_Common_2.8.2.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Server_Common_2.8.2.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Server_Common_2.8.2.xml
deleted file mode 100644
index 0c37acb..000
--- 
a/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Server_Common_2.8.2.xml
+++ /dev/null
@@ -1,829 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-
-
-  
-  
-  
-
-
-
-
-  
-  
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-
-
-  
-  
-
-
-
-
-  
-  
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-  
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-  
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-  
-
-
-
-
-
-  
-  
-
-
-


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: HADOOP-14990. Clean up jdiff xml files added for 2.8.2 release.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8083aaa/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Common_2.8.2.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Common_2.8.2.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Common_2.8.2.xml
deleted file mode 100644
index 8a4db43..000
--- 
a/hadoop-yarn-project/hadoop-yarn/dev-support/Apache_Hadoop_YARN_Common_2.8.2.xml
+++ /dev/null
@@ -1,2665 +0,0 @@
-
-
-
-
-
-
-
-
-
-  
-  
-
-
-
-
-
-
-  
-
-
-
-
-
-
-  
-  
-  
-
-
-  
-
-
-
-
-  
-
-
-
-
-  
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-
-
-
-
-  
-  
-  
-
-
-  
-
-
-
-
-  
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-  
-  
-
-
-
-
-  
-  
-
-
-
-
-  
-  
-
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-  
-  
-  
-  
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-
-
-  
-  
-  
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-  
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-  
-  
-  
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-  
-  
-
-
-  
-  
-
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-
-
-  
-  
-
-
-
-
-  
-  
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-
-
-  
-  
-
-
-  
-
-
-
-
-  
-
-
-  
-
-
-  
-  
-
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-  
-  
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-  
-
-
-
-
-  
-  
-
-
-
-
-
-
-  
-  
-
-
-  
-  
-  
-  
-
-
-
-
-
-
-
-
-
-
-  
-
-
-
-  
-  
-  
-  
-
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-
-
-  
-  
-  
-  
-  
-  
-  
-  
-
-
-
-  
-  
-
-
-
-
-
-
-  
-  
-
-
-
-
-
-  
-  
-
-
-  
-  
-
-
-
-
-
-
-
-
-
-
-
-
-  
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-  
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-
-
-  
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-
-
-
-
-
-
-
-
-  
-  
-  
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-
-
-
-
-
-
-  
-  
-
-
-  
-  
-
-
-
-
-
-
-  
-  
-  
-
-
-  
-  
-  
-
-
-
-
-  
-
-
-
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-
-  
-  
-
-  
-  
-  
-  
-
-
-
-  
-  
-  
-
-
-
-
-
-
-
-
-
-
-  
-  
-
-
-  
-  
-
-
-
-
-
-
-
-
-
-
-  
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-  
-  
-
-
-  
-  
-
-  
-  
-
-
-  
-  
-
-
-
-  
-  
-
-
-  
-  
-
-  
-  
-
-
-  
-  
-
-
-
-
-
-  
-  
-  
-  
-
-
-
-  
-  
-
-
-  
-  
-
-  
-  
- 

[09/50] [abbrv] hadoop git commit: YARN-6927. Add support for individual resource types requests in MapReduce (Contributed by Gergo Repas via Daniel Templeton)

2017-11-02 Thread haibochen
YARN-6927. Add support for individual resource types requests in MapReduce
(Contributed by Gergo Repas via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a7e8108
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a7e8108
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a7e8108

Branch: refs/heads/YARN-1011
Commit: 9a7e81083801a57d6bb96584988415cbef67460d
Parents: e4878a5
Author: Daniel Templeton 
Authored: Mon Oct 30 11:04:22 2017 -0700
Committer: Daniel Templeton 
Committed: Mon Oct 30 11:04:22 2017 -0700

--
 .../v2/app/job/impl/TaskAttemptImpl.java| 141 +++-
 .../mapreduce/TestMapreduceConfigFields.java|  11 +
 .../v2/app/job/impl/TestTaskAttempt.java| 353 ++-
 .../apache/hadoop/mapreduce/MRJobConfig.java|  68 +++-
 .../org/apache/hadoop/mapred/YARNRunner.java|  86 -
 .../apache/hadoop/mapred/TestYARNRunner.java| 161 +
 .../yarn/util/resource/ResourceUtils.java   |  44 +++
 7 files changed, 835 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a7e8108/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 00c7b84..90e0d21 100755
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.mapreduce.v2.app.job.impl;
 
+import static org.apache.commons.lang.StringUtils.isEmpty;
+
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -123,6 +125,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -136,6 +139,8 @@ import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.RackResolver;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -667,12 +672,8 @@ public abstract class TaskAttemptImpl implements
 this.jobFile = jobFile;
 this.partition = partition;
 
-//TODO:create the resource reqt for this Task attempt
 this.resourceCapability = recordFactory.newRecordInstance(Resource.class);
-this.resourceCapability.setMemorySize(
-getMemoryRequired(conf, taskId.getTaskType()));
-this.resourceCapability.setVirtualCores(
-getCpuRequired(conf, taskId.getTaskType()));
+populateResourceCapability(taskId.getTaskType());
 
 this.dataLocalHosts = resolveHosts(dataLocalHosts);
 RackResolver.init(conf);
@@ -689,25 +690,137 @@ public abstract class TaskAttemptImpl implements
 stateMachine = stateMachineFactory.make(this);
   }
 
+  private void populateResourceCapability(TaskType taskType) {
+String resourceTypePrefix =
+getResourceTypePrefix(taskType);
+boolean memorySet = false;
+boolean cpuVcoresSet = false;
+if (resourceTypePrefix != null) {
+  List resourceRequests =
+  ResourceUtils.getRequestedResourcesFromConfig(conf,
+  resourceTypePrefix);
+  for (ResourceInformation resourceRequest : resourceRequests) {
+String resourceName = resourceRequest.getName();
+if (MRJobConfig.RESOURCE_TYPE_NAME_MEMORY.equals(resourceName) ||
+MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY.equals(
+resourceName)) {
+  if (memorySet) {
+throw new IllegalArgumentException(
+"Only one of the following keys " +
+"can be specified for a single job

[11/50] [abbrv] hadoop git commit: HADOOP-14990. Clean up jdiff xml files added for 2.8.2 release.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8083aaa/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.2.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.2.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.2.xml
new file mode 100644
index 000..0c37acb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.2.xml
@@ -0,0 +1,829 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: HADOOP-14992. Upgrade Avro patch version. Contributed by Bharat Viswanadham

2017-11-02 Thread haibochen
HADOOP-14992. Upgrade Avro patch version. Contributed by Bharat Viswanadham


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4878a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4878a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4878a59

Branch: refs/heads/YARN-1011
Commit: e4878a59b3a380e323bb4627a69266bbf2524b36
Parents: 9711b78
Author: Chris Douglas 
Authored: Mon Oct 30 09:49:22 2017 -0700
Committer: Chris Douglas 
Committed: Mon Oct 30 09:49:22 2017 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4878a59/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 7490748..e44a50c 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -65,7 +65,7 @@
 file:///dev/urandom
 
 
-1.7.4
+1.7.7
 
 
 1.19


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: YARN-7397. Reduce lock contention in FairScheduler#getAppWeight()

2017-11-02 Thread haibochen
YARN-7397. Reduce lock contention in FairScheduler#getAppWeight()


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e62bbbca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e62bbbca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e62bbbca

Branch: refs/heads/YARN-1011
Commit: e62bbbca7adafa0e050212e99c41c95a844700ff
Parents: 9c5c687
Author: Daniel Templeton 
Authored: Sat Oct 28 09:13:13 2017 -0700
Committer: Daniel Templeton 
Committed: Sat Oct 28 09:13:13 2017 -0700

--
 .../scheduler/fair/FairScheduler.java| 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e62bbbca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 0441859..8ea07ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -369,17 +369,20 @@ public class FairScheduler extends
   }
 
   public float getAppWeight(FSAppAttempt app) {
-try {
+double weight = 1.0;
+
+if (sizeBasedWeight) {
   readLock.lock();
-  double weight = 1.0;
-  if (sizeBasedWeight) {
+
+  try {
 // Set weight based on current memory demand
 weight = Math.log1p(app.getDemand().getMemorySize()) / Math.log(2);
+  } finally {
+readLock.unlock();
   }
-  return (float)weight * app.getPriority().getPriority();
-} finally {
-  readLock.unlock();
 }
+
+return (float)weight * app.getPriority().getPriority();
   }
 
   public Resource getIncrementResourceCapability() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: HADOOP-14990. Clean up jdiff xml files added for 2.8.2 release.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8083aaa/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.2.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.2.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.2.xml
new file mode 100644
index 000..8a4db43
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.2.xml
@@ -0,0 +1,2665 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+
+
+  
+  
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+
+
+  
+  
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+  
+   

[07/50] [abbrv] hadoop git commit: YARN-7374. Improve performance of DRF comparisons for resource types in fair scheduler

2017-11-02 Thread haibochen
YARN-7374. Improve performance of DRF comparisons for resource types in fair 
scheduler


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9711b789
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9711b789
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9711b789

Branch: refs/heads/YARN-1011
Commit: 9711b78998ca3a1f7734058a78c7baddd130ce0f
Parents: d4811c8
Author: Daniel Templeton 
Authored: Sun Oct 29 17:45:46 2017 -0700
Committer: Daniel Templeton 
Committed: Sun Oct 29 18:54:33 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |   7 +-
 .../yarn/api/records/ResourceInformation.java   |   1 -
 .../DominantResourceFairnessPolicy.java | 238 +--
 .../TestDominantResourceFairnessPolicy.java | 102 ++--
 4 files changed, 306 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9711b789/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 9a5bc79..796b666 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -22,6 +22,7 @@ import java.util.Arrays;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -66,8 +67,10 @@ public abstract class Resource implements 
Comparable {
   // copy array, etc.
   protected static final int NUM_MANDATORY_RESOURCES = 2;
 
-  protected static final int MEMORY_INDEX = 0;
-  protected static final int VCORES_INDEX = 1;
+  @Private
+  public static final int MEMORY_INDEX = 0;
+  @Private
+  public static final int VCORES_INDEX = 1;
 
   @Public
   @Stable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9711b789/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index dad62fb..59908ef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.api.records;
 
 import com.google.common.collect.ImmutableMap;
-import org.apache.curator.shaded.com.google.common.reflect.ClassPath;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9711b789/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index e58b357..59635d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.ja

[27/50] [abbrv] hadoop git commit: HDFS-12499. dfs.namenode.shared.edits.dir property is currently namenode specific key. Contributed by Bharat Viswanadham.

2017-11-02 Thread haibochen
HDFS-12499. dfs.namenode.shared.edits.dir property is currently namenode 
specific key. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b922ba73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b922ba73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b922ba73

Branch: refs/heads/YARN-1011
Commit: b922ba7393bd97b98e90f50f01b4cc664c44adb9
Parents: 982bd2a
Author: Arpit Agarwal 
Authored: Tue Oct 31 10:08:05 2017 -0700
Committer: Arpit Agarwal 
Committed: Tue Oct 31 10:23:00 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java| 4 ++--
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java| 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b922ba73/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 32b873b..6125dea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -248,7 +248,6 @@ public class NameNode extends ReconfigurableBase implements
 DFS_NAMENODE_RPC_BIND_HOST_KEY,
 DFS_NAMENODE_NAME_DIR_KEY,
 DFS_NAMENODE_EDITS_DIR_KEY,
-DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
 DFS_NAMENODE_CHECKPOINT_DIR_KEY,
 DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
 DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY,
@@ -278,7 +277,8 @@ public class NameNode extends ReconfigurableBase implements
* for a specific namenode.
*/
   public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
-DFS_HA_AUTO_FAILOVER_ENABLED_KEY
+  DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
+  DFS_NAMENODE_SHARED_EDITS_DIR_KEY
   };
 
   private String ipcClientRPCBackoffEnable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b922ba73/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 39f76a5..64d2322 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -402,7 +402,7 @@ public class TestDFSUtil {
   public void testSomeConfsNNSpecificSomeNSSpecific() {
 final HdfsConfiguration conf = new HdfsConfiguration();
 
-String key = DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
+String key = DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 conf.set(key, "global-default");
 conf.set(key + ".ns1", "ns1-override");
 conf.set(key + ".ns1.nn1", "nn1-override");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: YARN-7224. Support GPU isolation for docker container. Contributed by Wangda Tan.

2017-11-02 Thread haibochen
YARN-7224. Support GPU isolation for docker container. Contributed by Wangda 
Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9114d7a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9114d7a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9114d7a5

Branch: refs/heads/YARN-1011
Commit: 9114d7a5a0159bbe70e9c289dbe1fc5db9101db5
Parents: e62bbbc
Author: Sunil G 
Authored: Sun Oct 29 11:08:44 2017 +0530
Committer: Sunil G 
Committed: Sun Oct 29 11:08:44 2017 +0530

--
 .../hadoop-yarn/conf/container-executor.cfg |   1 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  29 ++
 .../src/main/resources/yarn-default.xml |  42 ++-
 .../nodemanager/LinuxContainerExecutor.java |   3 +-
 .../resources/gpu/GpuResourceAllocator.java | 102 +++---
 .../resources/gpu/GpuResourceHandlerImpl.java   |  90 --
 .../runtime/DefaultLinuxContainerRuntime.java   |   3 +-
 .../DelegatingLinuxContainerRuntime.java|   9 +-
 .../runtime/DockerLinuxContainerRuntime.java|  91 +-
 .../JavaSandboxLinuxContainerRuntime.java   |   5 +-
 .../linux/runtime/LinuxContainerRuntime.java|   4 +-
 .../linux/runtime/docker/DockerRunCommand.java  |   5 +
 .../runtime/docker/DockerVolumeCommand.java |  49 +++
 .../resourceplugin/DockerCommandPlugin.java |  59 
 .../resourceplugin/ResourcePlugin.java  |  11 +
 .../resourceplugin/gpu/GpuDevice.java   |  78 +
 .../resourceplugin/gpu/GpuDiscoverer.java   |  30 +-
 .../gpu/GpuDockerCommandPluginFactory.java  |  41 +++
 .../gpu/GpuNodeResourceUpdateHandler.java   |  10 +-
 .../resourceplugin/gpu/GpuResourcePlugin.java   |   9 +
 .../gpu/NvidiaDockerV1CommandPlugin.java| 319 +++
 .../recovery/NMLeveldbStateStoreService.java|  62 ++--
 .../recovery/NMNullStateStoreService.java   |   3 +-
 .../recovery/NMStateStoreService.java   |  15 +-
 .../container-executor/impl/utils/docker-util.c | 130 
 .../container-executor/impl/utils/docker-util.h |  18 +-
 .../test/utils/test_docker_util.cc  |  42 +++
 .../TestLinuxContainerExecutorWithMocks.java|   6 +-
 .../TestContainerManagerRecovery.java   |   9 +-
 .../resources/gpu/TestGpuResourceHandler.java   | 156 ++---
 .../TestDelegatingLinuxContainerRuntime.java|  14 +-
 .../runtime/TestDockerContainerRuntime.java | 204 ++--
 .../TestJavaSandboxLinuxContainerRuntime.java   |   3 +-
 .../docker/TestDockerCommandExecutor.java   |   3 +-
 .../runtime/docker/TestDockerVolumeCommand.java |  45 +++
 .../resourceplugin/gpu/TestGpuDiscoverer.java   |  34 +-
 .../gpu/TestNvidiaDockerV1CommandPlugin.java| 217 +
 .../recovery/NMMemoryStateStoreService.java |   8 +-
 .../TestNMLeveldbStateStoreService.java |  22 +-
 39 files changed, 1721 insertions(+), 260 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9114d7a5/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
--
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg 
b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
index 023654b..7a84d76 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
+++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
@@ -14,3 +14,4 @@ feature.tc.enabled=0
 #  docker.allowed.ro-mounts=## comma seperated volumes that can be mounted as 
read-only
 #  docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as 
read-write, add the yarn local and log dirs to this list to run Hadoop jobs
 #  docker.privileged-containers.enabled=0
+#  docker.allowed.volume-drivers=## comma seperated list of allowed 
volume-drivers

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9114d7a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ca2fb66..640e86e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1483,6 +1483,35 @@ public class YarnConfiguration extends Configuration {
   @Private
   public static final String DEFAULT_NM_GPU_PATH_TO_EXEC = "";
 
+  /**
+   * Settings to control which implementation of docker plugin for GPU will be

[04/50] [abbrv] hadoop git commit: YARN-7224. Support GPU isolation for docker container. Contributed by Wangda Tan.

2017-11-02 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9114d7a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
index ca6d018..2d522a9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
@@ -35,6 +35,7 @@ import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.Localize
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 
 // The state store to use when state isn't being stored
 public class NMNullStateStoreService extends NMStateStoreService {
@@ -268,7 +269,7 @@ public class NMNullStateStoreService extends 
NMStateStoreService {
   }
 
   @Override
-  public void storeAssignedResources(ContainerId containerId,
+  public void storeAssignedResources(Container container,
   String resourceType, List assignedResources)
   throws IOException {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9114d7a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
index 5e2b8a5..598ea9e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
@@ -44,6 +44,7 @@ import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.Localize
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
 
 @Private
@@ -731,12 +732,12 @@ public abstract class NMStateStoreService extends 
AbstractService {
   /**
* Store the assigned resources to a container.
*
-   * @param containerId Container Id
+   * @param container NMContainer
* @param resourceType Resource Type
* @param assignedResources Assigned resources
* @throws IOException if fails
*/
-  public abstract void storeAssignedResources(ContainerId containerId,
+  public abstract void storeAssignedResources(Container container,
   String resourceType, List assignedResources)
   throws IOException;
 
@@ -745,4 +746,14 @@ public abstract class NMStateStoreService extends 
AbstractService {
   protected abstract void startStorage() throws IOException;
 
   protected abstract void closeStorage() throws IOException;
+
+  protected void updateContainerResourceMapping(Container container,
+  String resourceType, List assignedResources) {
+// Update Container#getResourceMapping.
+ResourceMappings.AssignedResources newAssigned =
+new ResourceMappings.AssignedResources();
+newAssigned.updateAssignedResources(assignedResources);
+container.getResourceMappings().addAssignedResources(resourceType,
+newAssigned);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9114d7a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c

[10/50] [abbrv] hadoop git commit: YARN-7336. Unsafe cast from long to int Resource.hashCode() method (Contributed by Miklos Szegedi via Daniel Templeton)

2017-11-02 Thread haibochen
YARN-7336. Unsafe cast from long to int Resource.hashCode() method
(Contributed by Miklos Szegedi via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d64736d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d64736d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d64736d5

Branch: refs/heads/YARN-1011
Commit: d64736d58965722b71d6eade578b6c4c266e6448
Parents: 9a7e810
Author: Daniel Templeton 
Authored: Mon Oct 30 12:40:29 2017 -0700
Committer: Daniel Templeton 
Committed: Mon Oct 30 12:41:28 2017 -0700

--
 .../main/java/org/apache/hadoop/yarn/api/records/Resource.java  | 4 ++--
 .../hadoop/yarn/api/records/impl/LightWeightResource.java   | 5 +
 2 files changed, 3 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d64736d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 796b666..6bdde18 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -463,11 +463,11 @@ public abstract class Resource implements 
Comparable {
   @Override
   public int hashCode() {
 final int prime = 47;
-long result = 0;
+int result = 0;
 for (ResourceInformation entry : resources) {
   result = prime * result + entry.hashCode();
 }
-return (int) result;
+return result;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d64736d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index a64d242..7b07bbd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -155,9 +155,6 @@ public class LightWeightResource extends Resource {
   @Override
   public int hashCode() {
 final int prime = 47;
-long result = prime + getMemorySize();
-result = prime * result + getVirtualCores();
-
-return (int) result;
+return prime * (prime + Long.hashCode(getMemorySize())) + 
getVirtualCores();
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: YARN-7178. Add documentation for Container Update API. (asuresh) [Forced Update!]

2017-11-02 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 0c8fd8973 -> a4cfabf28 (forced update)


YARN-7178. Add documentation for Container Update API. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24f8c5cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24f8c5cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24f8c5cc

Branch: refs/heads/YARN-1011
Commit: 24f8c5cce3adb88e5af84b3e48c4c447ac91e6d3
Parents: 139cc75
Author: Arun Suresh 
Authored: Fri Oct 27 15:27:20 2017 -0700
Committer: Arun Suresh 
Committed: Fri Oct 27 22:56:41 2017 -0700

--
 .../src/site/markdown/CapacityScheduler.md  | 52 
 1 file changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24f8c5cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 17a6ab9..4754c94 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -322,3 +322,55 @@ Changing queue/scheduler properties and adding/removing 
queues can be done in tw
   **Note:** When enabling scheduler configuration mutations via 
`yarn.scheduler.configuration.store.class`, *yarn rmadmin -refreshQueues* will 
be disabled, i.e. it will no longer be possible to update configuration via 
file.
 
   See the [YARN Resource Manager REST 
API](ResourceManagerRest.html#Scheduler_Configuration_Mutation_API) for 
examples on how to change scheduler configuration via REST, and [YARN Commands 
Reference](YarnCommands.html#schedulerconf) for examples on how to change 
scheduler configuration via command line.
+
+Updating a Container (Experimental - API may change in the future)
+
+
+  Once an Application Master has received a Container from the Resource 
Manager, it may request the Resource Manager to update certain attributes of 
the container.
+
+  Currently only two types of container updates are supported:
+
+  * **Resource Update** : Where the AM can request the RM to update the 
resource size of the container. For eg: Change the container from a 2GB, 2 
vcore container to a 4GB, 2 vcore container.
+  * **ExecutionType Update** : Where the AM can request the RM to update the 
ExecutionType of the container. For eg: Change the execution type from 
*GUARANTEED* to *OPPORTUNISTIC* or vice versa.
+  
+  This is facilitated by the AM populating the **updated_containers** field, 
which is a list of type **UpdateContainerRequestProto**, in 
**AllocateRequestProto.** The AM can make multiple container update requests in 
the same allocate call.
+  
+  The schema of the **UpdateContainerRequestProto** is as follows:
+  
+message UpdateContainerRequestProto {
+  required int32 container_version = 1;
+  required ContainerIdProto container_id = 2;
+  required ContainerUpdateTypeProto update_type = 3;
+  optional ResourceProto capability = 4;
+  optional ExecutionTypeProto execution_type = 5;
+}
+
+  The **ContainerUpdateTypeProto** is an enum:
+  
+enum ContainerUpdateTypeProto {
+  INCREASE_RESOURCE = 0;
+  DECREASE_RESOURCE = 1;
+  PROMOTE_EXECUTION_TYPE = 2;
+  DEMOTE_EXECUTION_TYPE = 3;
+}
+
+  As constrained by the above enum, the scheduler currently supports changing 
either the resource update OR executionType of a container in one update 
request.
+  
+  The AM must also provide the latest **ContainerProto** it received from the 
RM. This is the container which the RM will attempt to update.
+
+  If the RM is able to update the requested container, the updated container 
will be returned, in the **updated_containers** list field of type 
**UpdatedContainerProto** in the **AllocateResponseProto** return value of 
either the same allocate call or in one of the subsequent calls.
+  
+  The schema of the **UpdatedContainerProto** is as follows:
+  
+message UpdatedContainerProto {
+  required ContainerUpdateTypeProto update_type = 1;
+  required ContainerProto container = 2;
+}
+  
+  It specifies the type of container update that was performed on the 
Container and the updated Container object which container an updated token.
+
+  The container token can then be used by the AM to ask the corresponding NM 
to either start the container, if the container has not already been started or 
update the container using the updated token.
+  
+  The **DECREASE_RESOURCE** and **DEMOTE_EXECUTIO

[06/50] [abbrv] hadoop git commit: YARN-6747. TestFSAppStarvation.testPreemptionEnable fails intermittently. (Contributed by Miklos Szegedi)

2017-11-02 Thread haibochen
YARN-6747. TestFSAppStarvation.testPreemptionEnable fails intermittently. 
(Contributed by Miklos Szegedi)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4811c8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4811c8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4811c8c

Branch: refs/heads/YARN-1011
Commit: d4811c8cfae02f42a7aae1f775e87b6726caa3c9
Parents: 9114d7a
Author: Yufei Gu 
Authored: Sun Oct 29 16:44:16 2017 -0700
Committer: Yufei Gu 
Committed: Sun Oct 29 16:44:16 2017 -0700

--
 .../scheduler/fair/TestFSAppStarvation.java   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4811c8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
index 0712b4c..9665f9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
@@ -62,6 +62,9 @@ public class TestFSAppStarvation extends 
FairSchedulerTestBase {
 ALLOC_FILE.getAbsolutePath());
 conf.setBoolean(FairSchedulerConfiguration.PREEMPTION, true);
 conf.setFloat(FairSchedulerConfiguration.PREEMPTION_THRESHOLD, 0f);
+// This effectively disables the update thread since we call update
+// explicitly on the main thread
+conf.setLong(FairSchedulerConfiguration.UPDATE_INTERVAL_MS, 
Long.MAX_VALUE);
   }
 
   @After
@@ -124,16 +127,17 @@ public class TestFSAppStarvation extends 
FairSchedulerTestBase {
 
 // Wait for apps to be processed by MockPreemptionThread
 for (int i = 0; i < 6000; ++i) {
-  if(preemptionThread.totalAppsAdded() >
-  preemptionThread.uniqueAppsAdded()) {
+  if(preemptionThread.totalAppsAdded() >=
+  preemptionThread.uniqueAppsAdded() * 2) {
 break;
   }
   Thread.sleep(10);
 }
 
-assertTrue("Each app is marked as starved exactly once",
-preemptionThread.totalAppsAdded() >
-preemptionThread.uniqueAppsAdded());
+assertEquals("Each app should be marked as starved once" +
+" at each scheduler update above",
+preemptionThread.totalAppsAdded(),
+preemptionThread.uniqueAppsAdded() * 2);
   }
 
   /*


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: YARN-7299. Fix TestDistributedScheduler. (asuresh)

2017-11-02 Thread haibochen
YARN-7299. Fix TestDistributedScheduler. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c5c6874
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c5c6874
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c5c6874

Branch: refs/heads/YARN-1011
Commit: 9c5c68745ed18883ce8bdbdca379025b23f17f60
Parents: 24f8c5c
Author: Arun Suresh 
Authored: Fri Oct 27 22:48:29 2017 -0700
Committer: Arun Suresh 
Committed: Fri Oct 27 23:08:18 2017 -0700

--
 .../scheduler/TestDistributedScheduler.java | 52 +---
 1 file changed, 34 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c5c6874/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java
index 736dc31..dee2a20 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/TestDistributedScheduler.java
@@ -76,7 +76,9 @@ public class TestDistributedScheduler {
 
 registerAM(distributedScheduler, finalReqIntcptr, Arrays.asList(
 RemoteNode.newInstance(NodeId.newInstance("a", 1), "http://a:1";),
-RemoteNode.newInstance(NodeId.newInstance("b", 2), "http://b:2";)));
+RemoteNode.newInstance(NodeId.newInstance("b", 2), "http://b:2";),
+RemoteNode.newInstance(NodeId.newInstance("c", 3), "http://c:3";),
+RemoteNode.newInstance(NodeId.newInstance("d", 4), "http://d:4";)));
 
 final AtomicBoolean flipFlag = new AtomicBoolean(true);
 Mockito.when(
@@ -92,10 +94,18 @@ public class TestDistributedScheduler {
   RemoteNode.newInstance(
   NodeId.newInstance("c", 3), "http://c:3";),
   RemoteNode.newInstance(
-  NodeId.newInstance("d", 4), "http://d:4";)));
+  NodeId.newInstance("d", 4), "http://d:4";),
+  RemoteNode.newInstance(
+  NodeId.newInstance("e", 5), "http://e:5";),
+  RemoteNode.newInstance(
+  NodeId.newInstance("f", 6), "http://f:6";)));
 } else {
   return createAllocateResponse(Arrays.asList(
   RemoteNode.newInstance(
+  NodeId.newInstance("f", 6), "http://f:6";),
+  RemoteNode.newInstance(
+  NodeId.newInstance("e", 5), "http://e:5";),
+  RemoteNode.newInstance(
   NodeId.newInstance("d", 4), "http://d:4";),
   RemoteNode.newInstance(
   NodeId.newInstance("c", 3), "http://c:3";)));
@@ -117,34 +127,40 @@ public class TestDistributedScheduler {
 distributedScheduler.allocate(allocateRequest);
 Assert.assertEquals(4, allocateResponse.getAllocatedContainers().size());
 
-// Verify equal distribution on hosts a and b, and none on c or d
+// Verify equal distribution on hosts a, b, c and d, and none on e / f
+// NOTE: No more than 1 container will be allocated on a node in the
+//   top k list per allocate call.
 Map> allocs = mapAllocs(allocateResponse, 4);
-Assert.assertEquals(2, allocs.get(NodeId.newInstance("a", 1)).size());
-Assert.assertEquals(2, allocs.get(NodeId.newInstance("b", 2)).size());
-Assert.assertNull(allocs.get(NodeId.newInstance("c", 3)));
-Assert.assertNull(allocs.get(NodeId.newInstance("d", 4)));
+Assert.assertEquals(1, allocs.get(NodeId.newInstance("a", 1)).size());
+Assert.assertEquals(1, allocs.get(NodeId.newInstance("b", 2)).size());
+Assert.assertEquals(1, allocs.get(NodeId.newInstance("c", 3)).size());
+Assert.assertEquals(1, allocs.get(NodeId.newInstance("d", 4)).size());
+Assert.assertNull(allocs.get(NodeId.newInstance("e", 5)));
+Assert.assertNull(allocs.get(NodeId.newInstance("f", 6)));
 
 // New Allocate request
 allocateRequest = Records.newRecord(AllocateRequest.class);
 opportunisticReq =
-createResourceRequest(ExecutionType.OPPORTUNISTIC, 6, "*")

hadoop git commit: YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via wangda)

2017-11-02 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 cfef479b7 -> 7a658363d


YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via 
wangda)

Change-Id: Ia5336f407147d2985a61b5f9f1e69b5b35f398a4
(cherry picked from commit 752f000e0f521ca7be173d6ca338da4585284fd0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a658363
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a658363
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a658363

Branch: refs/heads/branch-2.9
Commit: 7a658363d91ffc646cccf95d32066b395bd8c770
Parents: cfef479
Author: Wangda Tan 
Authored: Thu Nov 2 09:50:00 2017 -0700
Committer: Wangda Tan 
Committed: Thu Nov 2 09:53:20 2017 -0700

--
 .../src/main/webapp/app/routes/cluster-overview.js  | 5 -
 .../src/main/webapp/app/utils/href-address-utils.js | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a658363/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
index 3c6abd4..d03ea0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
@@ -28,7 +28,10 @@ export default AbstractRoute.extend({
 {
   state: "RUNNING"
 }),
-  queues: this.store.query('yarn-queue.yarn-queue', {}),
+  queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => {
+let type = model.get('firstObject').get('type');
+return this.store.query("yarn-queue." + type + "-queue", {});
+  }),
 });
   },
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a658363/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
index e36de4b..896d448 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
@@ -22,6 +22,6 @@ export default {
   },
 
   getQueueLink: function(queueName) {
-return '#/yarn-queue/' + queueName;
+return '#/yarn-queue/' + queueName + '/info';
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via wangda)

2017-11-02 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk d00b6f7c1 -> 04c604cf1


YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via 
wangda)

Change-Id: Ia5336f407147d2985a61b5f9f1e69b5b35f398a4
(cherry picked from commit 752f000e0f521ca7be173d6ca338da4585284fd0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04c604cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04c604cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04c604cf

Branch: refs/heads/trunk
Commit: 04c604cf1d4e2c52b01a224e9d4a9888a3d8c740
Parents: d00b6f7
Author: Wangda Tan 
Authored: Thu Nov 2 09:50:00 2017 -0700
Committer: Wangda Tan 
Committed: Thu Nov 2 09:51:28 2017 -0700

--
 .../src/main/webapp/app/routes/cluster-overview.js  | 5 -
 .../src/main/webapp/app/utils/href-address-utils.js | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04c604cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
index 3c6abd4..d03ea0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
@@ -28,7 +28,10 @@ export default AbstractRoute.extend({
 {
   state: "RUNNING"
 }),
-  queues: this.store.query('yarn-queue.yarn-queue', {}),
+  queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => {
+let type = model.get('firstObject').get('type');
+return this.store.query("yarn-queue." + type + "-queue", {});
+  }),
 });
   },
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04c604cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
index e36de4b..896d448 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
@@ -22,6 +22,6 @@ export default {
   },
 
   getQueueLink: function(queueName) {
-return '#/yarn-queue/' + queueName;
+return '#/yarn-queue/' + queueName + '/info';
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via wangda)

2017-11-02 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b7dee1f06 -> fb442e9a7


YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via 
wangda)

Change-Id: Ia5336f407147d2985a61b5f9f1e69b5b35f398a4
(cherry picked from commit 752f000e0f521ca7be173d6ca338da4585284fd0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb442e9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb442e9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb442e9a

Branch: refs/heads/branch-3.0
Commit: fb442e9a7c7ff70b8cae61406d69047ab7347d0f
Parents: b7dee1f
Author: Wangda Tan 
Authored: Thu Nov 2 09:50:00 2017 -0700
Committer: Wangda Tan 
Committed: Thu Nov 2 09:50:57 2017 -0700

--
 .../src/main/webapp/app/routes/cluster-overview.js  | 5 -
 .../src/main/webapp/app/utils/href-address-utils.js | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb442e9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
index 3c6abd4..d03ea0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
@@ -28,7 +28,10 @@ export default AbstractRoute.extend({
 {
   state: "RUNNING"
 }),
-  queues: this.store.query('yarn-queue.yarn-queue', {}),
+  queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => {
+let type = model.get('firstObject').get('type');
+return this.store.query("yarn-queue." + type + "-queue", {});
+  }),
 });
   },
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb442e9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
index e36de4b..896d448 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
@@ -22,6 +22,6 @@ export default {
   },
 
   getQueueLink: function(queueName) {
-return '#/yarn-queue/' + queueName;
+return '#/yarn-queue/' + queueName + '/info';
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via wangda)

2017-11-02 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6ad688234 -> 752f000e0


YARN-7364. Queue dash board in new YARN UI has incorrect values. (Sunil G via 
wangda)

Change-Id: Ia5336f407147d2985a61b5f9f1e69b5b35f398a4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/752f000e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/752f000e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/752f000e

Branch: refs/heads/branch-2
Commit: 752f000e0f521ca7be173d6ca338da4585284fd0
Parents: 6ad6882
Author: Wangda Tan 
Authored: Thu Nov 2 09:50:00 2017 -0700
Committer: Wangda Tan 
Committed: Thu Nov 2 09:50:00 2017 -0700

--
 .../src/main/webapp/app/routes/cluster-overview.js  | 5 -
 .../src/main/webapp/app/utils/href-address-utils.js | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/752f000e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
index 3c6abd4..d03ea0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js
@@ -28,7 +28,10 @@ export default AbstractRoute.extend({
 {
   state: "RUNNING"
 }),
-  queues: this.store.query('yarn-queue.yarn-queue', {}),
+  queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => {
+let type = model.get('firstObject').get('type');
+return this.store.query("yarn-queue." + type + "-queue", {});
+  }),
 });
   },
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/752f000e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
index e36de4b..896d448 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/href-address-utils.js
@@ -22,6 +22,6 @@ export default {
   },
 
   getQueueLink: function(queueName) {
-return '#/yarn-queue/' + queueName;
+return '#/yarn-queue/' + queueName + '/info';
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-4511. Common scheduler changes to support scheduler-specific oversubscription implementations.

2017-11-02 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 e5a996e85 -> 0c8fd8973


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c8fd897/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index 740ef33..c56be29 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -563,7 +563,7 @@ public class TestNodeLabelContainerAllocation {
   int numContainers) {
 CapacityScheduler cs = (CapacityScheduler) 
rm.getRMContext().getScheduler();
 SchedulerNode node = cs.getSchedulerNode(nodeId);
-Assert.assertEquals(numContainers, node.getNumContainers());
+Assert.assertEquals(numContainers, node.getNumGuaranteedContainers());
   }
 
   /**
@@ -1065,7 +1065,7 @@ public class TestNodeLabelContainerAllocation {
 for (int i = 0; i < 50; i++) {
   cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
   cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
-  if (schedulerNode1.getNumContainers() == 0) {
+  if (schedulerNode1.getNumGuaranteedContainers() == 0) {
 cycleWaited++;
   }
 }
@@ -1131,7 +1131,7 @@ public class TestNodeLabelContainerAllocation {
 CSAMContainerLaunchDiagnosticsConstants.LAST_NODE_PROCESSED_MSG
 + nodeIdStr + " ( Partition : [x]"));
 Assert.assertEquals(0, cs.getSchedulerNode(nm1.getNodeId())
-.getNumContainers());
+.getNumGuaranteedContainers());
 
 rm1.close();
   }
@@ -1215,7 +1215,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x
-Assert.assertEquals(10, schedulerNode1.getNumContainers());
+Assert.assertEquals(10, schedulerNode1.getNumGuaranteedContainers());
 
 // check non-exclusive containers of LeafQueue is correctly updated
 LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
@@ -1943,7 +1943,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x
-Assert.assertEquals(5, schedulerNode1.getNumContainers());
+Assert.assertEquals(5, schedulerNode1.getNumGuaranteedContainers());
 
 SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
 .getNodeReport(nm1.getNodeId());
@@ -2043,7 +2043,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x (non-exclusive)
-Assert.assertEquals(3, schedulerNode1.getNumContainers());
+Assert.assertEquals(3, schedulerNode1.getNumGuaranteedContainers());
 
 SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
 .getNodeReport(nm1.getNodeId());
@@ -2074,7 +2074,7 @@ public class TestNodeLabelContainerAllocation {
 cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
 
 // app1 gets all resource in default partition
-Assert.assertEquals(2, schedulerNode2.getNumContainers());
+Assert.assertEquals(2, schedulerNode2.getNumGuaranteedContainers());
 
 // 3GB is used from label x quota. 2GB used from default label.
 // So total 2.5 GB is remaining.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c8fd897/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index 854a65c..d432991 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ 
b/hadoop-yarn-project/hadoop-ya

[2/2] hadoop git commit: YARN-4511. Common scheduler changes to support scheduler-specific oversubscription implementations.

2017-11-02 Thread haibochen
YARN-4511. Common scheduler changes to support scheduler-specific 
oversubscription implementations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c8fd897
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c8fd897
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c8fd897

Branch: refs/heads/YARN-1011
Commit: 0c8fd897388d9c641d779e3089d396620a4b7523
Parents: e5a996e
Author: Haibo Chen 
Authored: Thu Nov 2 09:12:19 2017 -0700
Committer: Haibo Chen 
Committed: Thu Nov 2 09:17:56 2017 -0700

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   6 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   6 +
 .../resourcemanager/ResourceTrackerService.java |   3 +-
 .../monitor/capacity/TempSchedulerNode.java |   2 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   7 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  13 +-
 .../scheduler/AbstractYarnScheduler.java|   4 +-
 .../scheduler/ClusterNodeTracker.java   |   6 +-
 .../scheduler/SchedulerNode.java| 317 +++
 .../scheduler/SchedulerNodeReport.java  |   4 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +-
 .../allocator/RegularContainerAllocator.java|   4 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   2 +-
 .../common/fica/FiCaSchedulerNode.java  |  11 +-
 .../scheduler/fair/FSPreemptionThread.java  |   2 +-
 .../scheduler/fair/FSSchedulerNode.java |   9 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   6 +
 .../TestWorkPreservingRMRestart.java|  26 +-
 ...alCapacityPreemptionPolicyMockFramework.java |   2 +-
 ...alCapacityPreemptionPolicyMockFramework.java |   6 +-
 .../scheduler/TestAbstractYarnScheduler.java|   4 +-
 .../scheduler/TestSchedulerNode.java| 393 +++
 .../capacity/TestCapacityScheduler.java |   2 +-
 .../TestCapacitySchedulerAsyncScheduling.java   |   8 +-
 .../scheduler/capacity/TestLeafQueue.java   |   4 +-
 .../TestNodeLabelContainerAllocation.java   |  14 +-
 .../fair/TestContinuousScheduling.java  |  42 +-
 .../scheduler/fair/TestFSSchedulerNode.java |  18 +-
 .../scheduler/fair/TestFairScheduler.java   |  14 +-
 .../scheduler/fifo/TestFifoScheduler.java   |   4 +-
 30 files changed, 780 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c8fd897/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index e71ddff..b92a3d1 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
+import org.apache.hadoop.yarn.server.api.records.OverAllocationInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode
@@ -206,6 +207,11 @@ public class NodeInfo {
 }
 
 @Override
+public OverAllocationInfo getOverAllocationInfo() {
+  return null;
+}
+
+@Override
 public long getUntrackedTimeStamp() {
   return 0;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c8fd897/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 6b7ac3c..b95b58a 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
+import org.apache.h

hadoop git commit: YARN-7286. Add support for docker to have no capabilities. Contributed by Eric Badger

2017-11-02 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 da6ea5c14 -> cfef479b7


YARN-7286. Add support for docker to have no capabilities. Contributed by Eric 
Badger

(cherry picked from commit b7dee1f0608006e776624a9e4de39811d8aebc97)
(cherry picked from commit 6ad6882343f73c285a00753dfef81ab68c7333ef)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfef479b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfef479b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfef479b

Branch: refs/heads/branch-2.9
Commit: cfef479b73bc49d2c5906e990baf7f9afd53d39a
Parents: da6ea5c
Author: Jason Lowe 
Authored: Thu Nov 2 09:50:55 2017 -0500
Committer: Jason Lowe 
Committed: Thu Nov 2 11:02:37 2017 -0500

--
 .../src/main/resources/yarn-default.xml |  3 +-
 .../runtime/DockerLinuxContainerRuntime.java| 30 --
 .../runtime/TestDockerContainerRuntime.java | 43 
 3 files changed, 71 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfef479b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 937b7b0..46fb7c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1618,7 +1618,8 @@
 This configuration setting determines the capabilities
   assigned to docker containers when they are launched. While these may not
   be case-sensitive from a docker perspective, it is best to keep these
-  uppercase.
+  uppercase. To run without any capabilites, set this value to
+  "none" or "NONE"
 yarn.nodemanager.runtime.linux.docker.capabilities
 
CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfef479b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 2013306..5e3e15c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -54,6 +54,7 @@ import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -182,6 +183,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   private boolean enableUserReMapping;
   private int userRemappingUidThreshold;
   private int userRemappingGidThreshold;
+  private Set capabilities;
 
   /**
* Return whether the given environment variables indicate that the operation
@@ -279,6 +281,30 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 userRemappingGidThreshold = conf.getInt(
   YarnConfiguration.NM_DOCKER_USER_REMAPPING_GID_THRESHOLD,
   YarnConfiguration.DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD);
+
+capabilities = getDockerCapabilitiesFromConf();
+  }
+
+  private Set getDockerCapabilitiesFromConf() throws
+  ContainerExecutionException {
+Set caps = new HashSet<>(Arrays.asList(
+conf.getTrimmedStrings(
+YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
+YarnConfiguration.DEFAULT_NM_DOCKER_CONTAINER_CAPABILITIES)));
+if(caps.contains("none") || caps.contains("NONE")) {
+  if(caps.size() > 1) {
+String msg = "Mixing capabilities with the none keyword is" +
+" not supported";
+throw new Conta

hadoop git commit: YARN-7286. Add support for docker to have no capabilities. Contributed by Eric Badger

2017-11-02 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 82fc80a98 -> 6ad688234


YARN-7286. Add support for docker to have no capabilities. Contributed by Eric 
Badger

(cherry picked from commit b7dee1f0608006e776624a9e4de39811d8aebc97)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ad68823
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ad68823
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ad68823

Branch: refs/heads/branch-2
Commit: 6ad6882343f73c285a00753dfef81ab68c7333ef
Parents: 82fc80a
Author: Jason Lowe 
Authored: Thu Nov 2 09:50:55 2017 -0500
Committer: Jason Lowe 
Committed: Thu Nov 2 09:50:55 2017 -0500

--
 .../src/main/resources/yarn-default.xml |  3 +-
 .../runtime/DockerLinuxContainerRuntime.java| 30 --
 .../runtime/TestDockerContainerRuntime.java | 43 
 3 files changed, 71 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ad68823/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 937b7b0..46fb7c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1618,7 +1618,8 @@
 This configuration setting determines the capabilities
   assigned to docker containers when they are launched. While these may not
   be case-sensitive from a docker perspective, it is best to keep these
-  uppercase.
+  uppercase. To run without any capabilites, set this value to
+  "none" or "NONE"
 yarn.nodemanager.runtime.linux.docker.capabilities
 
CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ad68823/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 2013306..5e3e15c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -54,6 +54,7 @@ import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -182,6 +183,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   private boolean enableUserReMapping;
   private int userRemappingUidThreshold;
   private int userRemappingGidThreshold;
+  private Set capabilities;
 
   /**
* Return whether the given environment variables indicate that the operation
@@ -279,6 +281,30 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 userRemappingGidThreshold = conf.getInt(
   YarnConfiguration.NM_DOCKER_USER_REMAPPING_GID_THRESHOLD,
   YarnConfiguration.DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD);
+
+capabilities = getDockerCapabilitiesFromConf();
+  }
+
+  private Set getDockerCapabilitiesFromConf() throws
+  ContainerExecutionException {
+Set caps = new HashSet<>(Arrays.asList(
+conf.getTrimmedStrings(
+YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
+YarnConfiguration.DEFAULT_NM_DOCKER_CONTAINER_CAPABILITIES)));
+if(caps.contains("none") || caps.contains("NONE")) {
+  if(caps.size() > 1) {
+String msg = "Mixing capabilities with the none keyword is" +
+" not supported";
+throw new ContainerExecutionException(msg);
+  }
+  caps = Collections.emptySet(

hadoop git commit: YARN-7286. Add support for docker to have no capabilities. Contributed by Eric Badger

2017-11-02 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 2ba71afcd -> b7dee1f06


YARN-7286. Add support for docker to have no capabilities. Contributed by Eric 
Badger

(cherry picked from commit d00b6f7c1ff2d7569ae9efdc6823ebcfb86ef2d4)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7dee1f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7dee1f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7dee1f0

Branch: refs/heads/branch-3.0
Commit: b7dee1f0608006e776624a9e4de39811d8aebc97
Parents: 2ba71afc
Author: Jason Lowe 
Authored: Thu Nov 2 09:37:17 2017 -0500
Committer: Jason Lowe 
Committed: Thu Nov 2 09:47:26 2017 -0500

--
 .../src/main/resources/yarn-default.xml |  3 +-
 .../runtime/DockerLinuxContainerRuntime.java| 30 --
 .../runtime/TestDockerContainerRuntime.java | 43 
 3 files changed, 71 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7dee1f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 09c782e..9abf600 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1607,7 +1607,8 @@
 This configuration setting determines the capabilities
   assigned to docker containers when they are launched. While these may not
   be case-sensitive from a docker perspective, it is best to keep these
-  uppercase.
+  uppercase. To run without any capabilites, set this value to
+  "none" or "NONE"
 yarn.nodemanager.runtime.linux.docker.capabilities
 
CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7dee1f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 2013306..5e3e15c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -54,6 +54,7 @@ import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -182,6 +183,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   private boolean enableUserReMapping;
   private int userRemappingUidThreshold;
   private int userRemappingGidThreshold;
+  private Set capabilities;
 
   /**
* Return whether the given environment variables indicate that the operation
@@ -279,6 +281,30 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 userRemappingGidThreshold = conf.getInt(
   YarnConfiguration.NM_DOCKER_USER_REMAPPING_GID_THRESHOLD,
   YarnConfiguration.DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD);
+
+capabilities = getDockerCapabilitiesFromConf();
+  }
+
+  private Set getDockerCapabilitiesFromConf() throws
+  ContainerExecutionException {
+Set caps = new HashSet<>(Arrays.asList(
+conf.getTrimmedStrings(
+YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
+YarnConfiguration.DEFAULT_NM_DOCKER_CONTAINER_CAPABILITIES)));
+if(caps.contains("none") || caps.contains("NONE")) {
+

hadoop git commit: YARN-7286. Add support for docker to have no capabilities. Contributed by Eric Badger

2017-11-02 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk cc3f3eca4 -> d00b6f7c1


YARN-7286. Add support for docker to have no capabilities. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d00b6f7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d00b6f7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d00b6f7c

Branch: refs/heads/trunk
Commit: d00b6f7c1ff2d7569ae9efdc6823ebcfb86ef2d4
Parents: cc3f3ec
Author: Jason Lowe 
Authored: Thu Nov 2 09:37:17 2017 -0500
Committer: Jason Lowe 
Committed: Thu Nov 2 09:37:17 2017 -0500

--
 .../src/main/resources/yarn-default.xml |  3 +-
 .../runtime/DockerLinuxContainerRuntime.java| 30 --
 .../runtime/TestDockerContainerRuntime.java | 43 
 3 files changed, 71 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d00b6f7c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 8487e72..f4b2e61 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1623,7 +1623,8 @@
 This configuration setting determines the capabilities
   assigned to docker containers when they are launched. While these may not
   be case-sensitive from a docker perspective, it is best to keep these
-  uppercase.
+  uppercase. To run without any capabilites, set this value to
+  "none" or "NONE"
 yarn.nodemanager.runtime.linux.docker.capabilities
 
CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d00b6f7c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 6f7b6fd..a425cf8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -58,6 +58,7 @@ import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -187,6 +188,7 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   private boolean enableUserReMapping;
   private int userRemappingUidThreshold;
   private int userRemappingGidThreshold;
+  private Set capabilities;
 
   /**
* Return whether the given environment variables indicate that the operation
@@ -285,6 +287,30 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 userRemappingGidThreshold = conf.getInt(
   YarnConfiguration.NM_DOCKER_USER_REMAPPING_GID_THRESHOLD,
   YarnConfiguration.DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD);
+
+capabilities = getDockerCapabilitiesFromConf();
+  }
+
+  private Set getDockerCapabilitiesFromConf() throws
+  ContainerExecutionException {
+Set caps = new HashSet<>(Arrays.asList(
+conf.getTrimmedStrings(
+YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
+YarnConfiguration.DEFAULT_NM_DOCKER_CONTAINER_CAPABILITIES)));
+if(caps.contains("none") || caps.contains("NONE")) {
+  if(caps.size() > 1) {
+String msg = "Mixing capabilities with the none keyword is" +
+" not supported";
+throw new ContainerExecutionException(msg);
+  }
+  caps = Collections.emptySet();
+}
+
+return caps;
+  }
+
+  public Set getCapabilities() {
+

[2/2] hadoop git commit: Add missing 2.8.2 jdiff for YARN API

2017-11-02 Thread asuresh
Add missing 2.8.2 jdiff for YARN API


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da6ea5c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da6ea5c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da6ea5c1

Branch: refs/heads/branch-2.9
Commit: da6ea5c1441477a64e89bf4df44f641e778963a6
Parents: 05b7b3d
Author: Arun Suresh 
Authored: Thu Nov 2 07:09:54 2017 -0700
Committer: Arun Suresh 
Committed: Thu Nov 2 07:09:54 2017 -0700

--
 .../jdiff/Apache_Hadoop_YARN_API_2.8.2.xml  | 16988 +
 1 file changed, 16988 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang zhenyi.

2017-11-02 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 6005f0a3e -> 05b7b3d12


MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang 
zhenyi.

(cherry picked from commit cc3f3eca409f5e57e540849a80e6448bb4924cc6)
(cherry picked from commit 82fc80a9875fbb440834165d29bb4f19b7f4227e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05b7b3d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05b7b3d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05b7b3d1

Branch: refs/heads/branch-2.9
Commit: 05b7b3d1268f3c59959ccc139b30108363139e5e
Parents: 6005f0a
Author: Akira Ajisaka 
Authored: Thu Nov 2 18:32:24 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 2 18:34:46 2017 +0900

--
 .../java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05b7b3d1/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 9bf8e47..2335854 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -68,7 +68,7 @@ class DynamicInputChunk {
* Method to write records into a chunk.
* @param key Key from the listing file.
* @param value Corresponding value from the listing file.
-   * @throws IOException Exception onf failure to write to the file.
+   * @throws IOException Exception on failure to write to the file.
*/
   public void write(Text key, CopyListingFileStatus value) throws IOException {
 writer.append(key, value);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang zhenyi.

2017-11-02 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e53fcd52e -> 82fc80a98


MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang 
zhenyi.

(cherry picked from commit cc3f3eca409f5e57e540849a80e6448bb4924cc6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82fc80a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82fc80a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82fc80a9

Branch: refs/heads/branch-2
Commit: 82fc80a9875fbb440834165d29bb4f19b7f4227e
Parents: e53fcd5
Author: Akira Ajisaka 
Authored: Thu Nov 2 18:32:24 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 2 18:33:25 2017 +0900

--
 .../java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82fc80a9/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 9bf8e47..2335854 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -68,7 +68,7 @@ class DynamicInputChunk {
* Method to write records into a chunk.
* @param key Key from the listing file.
* @param value Corresponding value from the listing file.
-   * @throws IOException Exception onf failure to write to the file.
+   * @throws IOException Exception on failure to write to the file.
*/
   public void write(Text key, CopyListingFileStatus value) throws IOException {
 writer.append(key, value);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang zhenyi.

2017-11-02 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6b31a94b0 -> 2ba71afcd


MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang 
zhenyi.

(cherry picked from commit cc3f3eca409f5e57e540849a80e6448bb4924cc6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ba71afc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ba71afc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ba71afc

Branch: refs/heads/branch-3.0
Commit: 2ba71afcd5c5feb707af3a83ba137da856c62847
Parents: 6b31a94
Author: Akira Ajisaka 
Authored: Thu Nov 2 18:32:24 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 2 18:32:57 2017 +0900

--
 .../java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ba71afc/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 9bf8e47..2335854 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -68,7 +68,7 @@ class DynamicInputChunk {
* Method to write records into a chunk.
* @param key Key from the listing file.
* @param value Corresponding value from the listing file.
-   * @throws IOException Exception onf failure to write to the file.
+   * @throws IOException Exception on failure to write to the file.
*/
   public void write(Text key, CopyListingFileStatus value) throws IOException {
 writer.append(key, value);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang zhenyi.

2017-11-02 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk cde56b9ce -> cc3f3eca4


MAPREDUCE-6999. Fix typo onf in DynamicInputChunk.java. Contributed by fang 
zhenyi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc3f3eca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc3f3eca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc3f3eca

Branch: refs/heads/trunk
Commit: cc3f3eca409f5e57e540849a80e6448bb4924cc6
Parents: cde56b9
Author: Akira Ajisaka 
Authored: Thu Nov 2 18:32:24 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 2 18:32:24 2017 +0900

--
 .../java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc3f3eca/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 9bf8e47..2335854 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -68,7 +68,7 @@ class DynamicInputChunk {
* Method to write records into a chunk.
* @param key Key from the listing file.
* @param value Corresponding value from the listing file.
-   * @throws IOException Exception onf failure to write to the file.
+   * @throws IOException Exception on failure to write to the file.
*/
   public void write(Text key, CopyListingFileStatus value) throws IOException {
 writer.append(key, value);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14997. Add hadoop-aliyun as dependency of hadoop-cloud-storage. Contributed by Genmao Yu

2017-11-02 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 178751ed8 -> cde56b9ce


HADOOP-14997. Add hadoop-aliyun as dependency of hadoop-cloud-storage. 
Contributed by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cde56b9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cde56b9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cde56b9c

Branch: refs/heads/trunk
Commit: cde56b9cefe1eb2943eef56a6aa7fdfa1b78e909
Parents: 178751e
Author: Sammi Chen 
Authored: Thu Nov 2 14:26:16 2017 +0800
Committer: Sammi Chen 
Committed: Thu Nov 2 17:12:04 2017 +0800

--
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cde56b9c/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 9711e52..73a9d41 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -105,6 +105,11 @@
 
 
   org.apache.hadoop
+  hadoop-aliyun
+  compile
+
+
+  org.apache.hadoop
   hadoop-aws
   compile
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14997. Add hadoop-aliyun as dependency of hadoop-cloud-storage. Contributed by Genmao Yu

2017-11-02 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 12f92e636 -> 6b31a94b0


HADOOP-14997. Add hadoop-aliyun as dependency of hadoop-cloud-storage. 
Contributed by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b31a94b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b31a94b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b31a94b

Branch: refs/heads/branch-3.0
Commit: 6b31a94b01db4e3161070c95ba499591de570045
Parents: 12f92e6
Author: Sammi Chen 
Authored: Thu Nov 2 14:33:22 2017 +0800
Committer: Sammi Chen 
Committed: Thu Nov 2 17:08:53 2017 +0800

--
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b31a94b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
--
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml 
b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 4d69e94..46dadcf 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -105,6 +105,11 @@
 
 
   org.apache.hadoop
+  hadoop-aliyun
+  compile
+
+
+  org.apache.hadoop
   hadoop-aws
   compile
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2017-11-02 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/apache-trunk [deleted] 0b9eb4c86

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2017-11-02 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/apache-3.0 [deleted] 4b01ae9c6

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: addendum patch for YARN-7289.

2017-11-02 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 355539717 -> 6005f0a3e


addendum patch for YARN-7289.

(cherry picked from commit 940ffe3f9ce74286a8863e3743faf88a33c817a3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6005f0a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6005f0a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6005f0a3

Branch: refs/heads/branch-2.9
Commit: 6005f0a3eeba2ff01fd9a29a640377e427a73fbb
Parents: 3555397
Author: Rohith Sharma K S 
Authored: Thu Nov 2 13:55:19 2017 +0530
Committer: Rohith Sharma K S 
Committed: Thu Nov 2 14:11:33 2017 +0530

--
 .../scheduler/AbstractYarnScheduler.java|  4 ++--
 .../scheduler/capacity/CapacityScheduler.java   |  2 +-
 .../rmapp/TestApplicationLifetimeMonitor.java   | 24 +++-
 3 files changed, 16 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6005f0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 5b6fdc6..3e41da7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1327,8 +1327,8 @@ public abstract class AbstractYarnScheduler
 
   @Override
   public long checkAndGetApplicationLifetime(String queueName, long lifetime) {
-// -1 indicates, lifetime is not configured.
-return -1;
+// Lifetime is the application lifetime by default.
+return lifetime;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6005f0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 6e7b8a8..d472d35 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2592,7 +2592,7 @@ public class CapacityScheduler extends
   long maximumApplicationLifetime =
   ((LeafQueue) queue).getMaximumApplicationLifetime();
 
-  // check only for maximum, that's enough because default cann't
+  // check only for maximum, that's enough because default can't
   // exceed maximum
   if (maximumApplicationLifetime <= 0) {
 return lifetimeRequestedByApp;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6005f0a3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
index b1da9af..037ed99 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.jav

[2/3] hadoop git commit: MAPREDUCE-6983. Moving logging APIs over to slf4j in hadoop-mapreduce-client-core. Contributed by Jinjiang Ling.

2017-11-02 Thread aajisaka
http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f92e63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
index b1cb6dc..46e4f1a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -27,8 +27,6 @@ import org.apache.avro.io.Encoder;
 import org.apache.avro.io.EncoderFactory;
 import org.apache.avro.specific.SpecificDatumWriter;
 import org.apache.avro.util.Utf8;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -36,6 +34,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.CounterGroup;
 import org.apache.hadoop.mapreduce.Counters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -55,7 +55,7 @@ public class EventWriter {
   private DatumWriter writer =
 new SpecificDatumWriter(Event.class);
   private Encoder encoder;
-  private static final Log LOG = LogFactory.getLog(EventWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(EventWriter.class);
 
   /**
* avro encoding format supported by EventWriter.
@@ -112,7 +112,7 @@ public class EventWriter {
   out.close();
   out = null;
 } finally {
-  IOUtils.cleanup(LOG, out);
+  IOUtils.cleanupWithLogger(LOG, out);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f92e63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
index 28fcc92..6efb4f7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
@@ -24,8 +24,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -45,6 +43,8 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Default Parser for the JobHistory files. Typical usage is
@@ -56,7 +56,8 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 @InterfaceStability.Unstable
 public class JobHistoryParser implements HistoryEventHandler {
 
-  private static final Log LOG = LogFactory.getLog(JobHistoryParser.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(JobHistoryParser.class);
   
   private final FSDataInputStream in;
   private JobInfo info = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f92e63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
index 325c375..dd1ebdb 10

[1/3] hadoop git commit: MAPREDUCE-6983. Moving logging APIs over to slf4j in hadoop-mapreduce-client-core. Contributed by Jinjiang Ling.

2017-11-02 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 9efe9bd3d -> 12f92e636


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f92e63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
index 2e2dbe1..588500c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.mapreduce.jobhistory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapred.TaskAttemptID;
 import org.apache.hadoop.mapred.TaskID;
 import org.apache.hadoop.mapred.TaskStatus;
@@ -30,6 +28,8 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.skyscreamer.jsonassert.JSONAssert;
 import org.skyscreamer.jsonassert.JSONCompareMode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
@@ -38,8 +38,8 @@ import java.util.TimeZone;
 
 public class TestHistoryViewerPrinter {
 
-  private static final Log LOG = LogFactory.getLog(
-  TestHistoryViewerPrinter.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestHistoryViewerPrinter.class);
 
   @Test
   public void testHumanPrinter() throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f92e63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
index 7e72802..4c847fa 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFileInputFormat.java
@@ -28,8 +28,6 @@ import javax.annotation.Nullable;
 
 import org.junit.Assert;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
@@ -48,6 +46,8 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
@@ -57,7 +57,8 @@ import com.google.common.collect.Sets;
 @RunWith(value = Parameterized.class)
 public class TestFileInputFormat {
   
-  private static final Log LOG = LogFactory.getLog(TestFileInputFormat.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestFileInputFormat.class);
   
   private static String testTmpDir = System.getProperty("test.build.data", 
"/tmp");
   private static final Path TEST_ROOT_DIR = new Path(testTmpDir, "TestFIF");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12f92e63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
index abbfcb2..f72aa55 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/s

[3/3] hadoop git commit: MAPREDUCE-6983. Moving logging APIs over to slf4j in hadoop-mapreduce-client-core. Contributed by Jinjiang Ling.

2017-11-02 Thread aajisaka
MAPREDUCE-6983. Moving logging APIs over to slf4j in 
hadoop-mapreduce-client-core. Contributed by Jinjiang Ling.

(cherry picked from commit 178751ed8c9d47038acf8616c226f1f52e884feb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12f92e63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12f92e63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12f92e63

Branch: refs/heads/branch-3.0
Commit: 12f92e636efbc1c324da4ec4751dc21dee935062
Parents: 9efe9bd
Author: Akira Ajisaka 
Authored: Thu Nov 2 17:42:52 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Nov 2 17:43:55 2017 +0900

--
 .../org/apache/hadoop/mapred/BackupStore.java |  7 ---
 .../org/apache/hadoop/mapred/CleanupQueue.java|  8 
 .../java/org/apache/hadoop/mapred/Counters.java   |  4 ++--
 .../DeprecatedQueueConfigurationParser.java   |  8 
 .../org/apache/hadoop/mapred/FileInputFormat.java |  8 
 .../apache/hadoop/mapred/FileOutputCommitter.java |  6 +++---
 .../main/java/org/apache/hadoop/mapred/IFile.java |  6 +++---
 .../apache/hadoop/mapred/IFileInputStream.java|  7 ---
 .../java/org/apache/hadoop/mapred/IndexCache.java |  6 +++---
 .../org/apache/hadoop/mapred/JobACLsManager.java  |  6 +++---
 .../java/org/apache/hadoop/mapred/JobConf.java|  6 +++---
 .../org/apache/hadoop/mapred/JobEndNotifier.java  |  8 
 .../java/org/apache/hadoop/mapred/JvmContext.java |  8 
 .../apache/hadoop/mapred/LineRecordReader.java|  8 
 .../java/org/apache/hadoop/mapred/MapTask.java|  7 ---
 .../java/org/apache/hadoop/mapred/Merger.java |  6 +++---
 .../main/java/org/apache/hadoop/mapred/Queue.java | 10 +-
 .../hadoop/mapred/QueueConfigurationParser.java   |  8 
 .../org/apache/hadoop/mapred/QueueManager.java|  6 +++---
 .../java/org/apache/hadoop/mapred/ReduceTask.java |  9 +
 .../org/apache/hadoop/mapred/SortedRanges.java|  8 
 .../main/java/org/apache/hadoop/mapred/Task.java  | 18 +-
 .../java/org/apache/hadoop/mapred/TaskLog.java| 11 +--
 .../java/org/apache/hadoop/mapred/TaskStatus.java |  8 
 .../org/apache/hadoop/mapred/jobcontrol/Job.java  |  6 +++---
 .../mapred/lib/FieldSelectionMapReduce.java   |  7 ---
 .../apache/hadoop/mapred/lib/InputSampler.java|  6 +++---
 .../hadoop/mapred/lib/MultithreadedMapRunner.java |  8 
 .../apache/hadoop/mapred/pipes/Application.java   |  7 ---
 .../hadoop/mapred/pipes/BinaryProtocol.java   |  8 
 .../apache/hadoop/mapred/pipes/PipesReducer.java  |  7 ---
 .../org/apache/hadoop/mapred/pipes/Submitter.java |  6 +++---
 .../java/org/apache/hadoop/mapreduce/Cluster.java |  7 ---
 .../org/apache/hadoop/mapreduce/CryptoUtils.java  |  6 +++---
 .../java/org/apache/hadoop/mapreduce/Job.java |  6 +++---
 .../hadoop/mapreduce/JobResourceUploader.java |  7 ---
 .../hadoop/mapreduce/JobSubmissionFiles.java  |  7 ---
 .../org/apache/hadoop/mapreduce/JobSubmitter.java | 11 +--
 .../mapreduce/counters/AbstractCounters.java  |  7 ---
 .../counters/FileSystemCounterGroup.java  |  7 ---
 .../mapreduce/counters/FrameworkCounterGroup.java |  7 ---
 .../hadoop/mapreduce/jobhistory/EventWriter.java  |  8 
 .../mapreduce/jobhistory/JobHistoryParser.java|  7 ---
 .../mapreduce/lib/db/BigDecimalSplitter.java  |  7 ---
 .../hadoop/mapreduce/lib/db/DBInputFormat.java|  7 ---
 .../hadoop/mapreduce/lib/db/DBOutputFormat.java   |  7 ---
 .../hadoop/mapreduce/lib/db/DBRecordReader.java   |  7 ---
 .../mapreduce/lib/db/DataDrivenDBInputFormat.java |  7 ---
 .../lib/db/DataDrivenDBRecordReader.java  |  7 ---
 .../hadoop/mapreduce/lib/db/DateSplitter.java |  6 +++---
 .../hadoop/mapreduce/lib/db/FloatSplitter.java|  7 ---
 .../mapreduce/lib/db/OracleDBRecordReader.java|  7 ---
 .../lib/db/OracleDataDrivenDBInputFormat.java |  3 ---
 .../hadoop/mapreduce/lib/db/TextSplitter.java |  6 +++---
 .../lib/fieldsel/FieldSelectionMapper.java|  7 ---
 .../lib/fieldsel/FieldSelectionReducer.java   |  7 ---
 .../lib/input/CombineFileInputFormat.java |  7 ---
 .../mapreduce/lib/input/FileInputFormat.java  |  7 ---
 .../lib/input/FixedLengthRecordReader.java|  8 
 .../mapreduce/lib/input/LineRecordReader.java |  7 ---
 .../lib/input/SequenceFileInputFilter.java|  9 +
 .../mapreduce/lib/jobcontrol/ControlledJob.java   |  7 ---
 .../mapreduce/lib/jobcontrol/JobControl.java  |  6 +++---
 .../mapreduce/lib/map/MultithreadedMapper.java|  7 ---
 .../mapreduce/lib/output/FileOutputCommitter.java |  7 ---
 .../lib/output/PartialFileOutputCommitter.java|  8 
 .../mapre

  1   2   >