hadoop git commit: YARN-6923. Metrics for Federation Router. (Giovanni Matteo Fumarola via asuresh)

2017-08-21 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 75abc9a8e -> ae8fb13b3


YARN-6923. Metrics for Federation Router. (Giovanni Matteo Fumarola via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae8fb13b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae8fb13b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae8fb13b

Branch: refs/heads/trunk
Commit: ae8fb13b312b30de50d65b5450b565d50d690e9e
Parents: 75abc9a
Author: Arun Suresh 
Authored: Mon Aug 21 22:50:24 2017 -0700
Committer: Arun Suresh 
Committed: Mon Aug 21 22:50:24 2017 -0700

--
 .../yarn/server/router/RouterMetrics.java   | 203 +++
 .../clientrm/FederationClientInterceptor.java   |  37 ++-
 .../webapp/FederationInterceptorREST.java   | 116 +++--
 .../yarn/server/router/TestRouterMetrics.java   | 248 +++
 .../webapp/TestFederationInterceptorREST.java   |  12 +-
 5 files changed, 593 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae8fb13b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
new file mode 100644
index 000..42361a3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.router;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.*;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+
+/**
+ * This class is for maintaining the various Router Federation Interceptor
+ * activity statistics and publishing them through the metrics interfaces.
+ */
+@InterfaceAudience.Private
+@Metrics(about = "Metrics for Router Federation Interceptor", context = "fedr")
+public final class RouterMetrics {
+
+  private static final MetricsInfo RECORD_INFO =
+  info("RouterMetrics", "Router Federation Interceptor");
+  private static AtomicBoolean isInitialized = new AtomicBoolean(false);
+
+  // Metrics for operation failed
+  @Metric("# of applications failed to be submitted")
+  private MutableGaugeInt numAppsFailedSubmitted;
+  @Metric("# of applications failed to be created")
+  private MutableGaugeInt numAppsFailedCreated;
+  @Metric("# of applications failed to be killed")
+  private MutableGaugeInt numAppsFailedKilled;
+  @Metric("# of application reports failed to be retrieved")
+  private MutableGaugeInt numAppsFailedRetrieved;
+
+  // Aggregate metrics are shared, and don't have to be looked up per call
+  @Metric("Total number of successful Submitted apps and latency(ms)")
+  private MutableRate totalSucceededAppsSubmitted;
+  @Metric("Total number of successful Killed apps and latency(ms)")
+  private MutableRate totalSucceededAppsKilled;
+  @Metric("Total number of successful Created apps and latency(ms)")
+  private MutableRate totalSucceededAppsCreated;
+  @Metric("Total number of successful Retrieved app reports and latency(ms)")
+  private MutableRate totalSucceededAppsRetrieved;
+
+  /**
+   * Provide quantile counters for all latencies.
+   */
+  private MutableQuantiles submitApplicationLatency;
+  private 

hadoop git commit: YARN-5603. Metrics for Federation StateStore. (Ellen Hui via asuresh)

2017-08-21 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk b6bfb2fcb -> 75abc9a8e


YARN-5603. Metrics for Federation StateStore. (Ellen Hui via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75abc9a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75abc9a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75abc9a8

Branch: refs/heads/trunk
Commit: 75abc9a8e2cf1c7d2c574ede720df59421512be3
Parents: b6bfb2f
Author: Arun Suresh 
Authored: Mon Aug 21 22:43:08 2017 -0700
Committer: Arun Suresh 
Committed: Mon Aug 21 22:43:08 2017 -0700

--
 .../store/impl/SQLFederationStateStore.java |  79 
 .../FederationStateStoreClientMetrics.java  | 184 +++
 .../federation/store/metrics/package-info.java  |  17 ++
 .../TestFederationStateStoreClientMetrics.java  | 146 +++
 4 files changed, 426 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75abc9a8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
index 63d8e42..533f9c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException;
+import 
org.apache.hadoop.yarn.server.federation.store.metrics.FederationStateStoreClientMetrics;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
@@ -72,6 +73,8 @@ import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationMembership
 import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator;
 import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils;
 import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.MonotonicClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -137,6 +140,7 @@ public class SQLFederationStateStore implements 
FederationStateStore {
   private String url;
   private int maximumPoolSize;
   private HikariDataSource dataSource = null;
+  private final Clock clock = new MonotonicClock();
 
   @Override
   public void init(Configuration conf) throws YarnException {
@@ -203,7 +207,9 @@ public class SQLFederationStateStore implements 
FederationStateStore {
   cstmt.registerOutParameter(9, java.sql.Types.INTEGER);
 
   // Execute the query
+  long startTime = clock.getTime();
   cstmt.executeUpdate();
+  long stopTime = clock.getTime();
 
   // Check the ROWCOUNT value, if it is equal to 0 it means the call
   // did not add a new subcluster into FederationStateStore
@@ -222,8 +228,11 @@ public class SQLFederationStateStore implements 
FederationStateStore {
 
   LOG.info(
   "Registered the SubCluster " + subClusterId + " into the 
StateStore");
+  FederationStateStoreClientMetrics
+  .succeededStateStoreCall(stopTime - startTime);
 
 } catch (SQLException e) {
+  FederationStateStoreClientMetrics.failedStateStoreCall();
   FederationStateStoreUtils.logAndThrowRetriableException(LOG,
   "Unable to register the SubCluster " + subClusterId
   + " into the StateStore",
@@ -260,7 +269,9 @@ public class SQLFederationStateStore implements 
FederationStateStore {
   cstmt.registerOutParameter(3, java.sql.Types.INTEGER);
 
   // Execute the query
+  long startTime = clock.getTime();
   cstmt.executeUpdate();
+  long stopTime = 

hadoop git commit: MAPREDUCE-6838. [ATSv2 Security] Add timeline delegation token received in allocate response to UGI. Contributed by Varun Saxena

2017-08-21 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 2288f5e6d -> c6ce422a3


MAPREDUCE-6838. [ATSv2 Security] Add timeline delegation token received in 
allocate response to UGI. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6ce422a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6ce422a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6ce422a

Branch: refs/heads/YARN-5355
Commit: c6ce422a3423305ac665f80f039f61294adaa32c
Parents: 2288f5e
Author: Jian He 
Authored: Mon Aug 21 22:08:07 2017 -0700
Committer: Jian He 
Committed: Mon Aug 21 22:08:07 2017 -0700

--
 .../v2/app/rm/RMContainerAllocator.java |  17 +--
 .../v2/app/rm/TestRMContainerAllocator.java | 137 +++
 .../hadoop/yarn/api/records/CollectorInfo.java  |   4 +
 .../api/async/impl/AMRMClientAsyncImpl.java |  13 +-
 .../yarn/client/api/TimelineV2Client.java   |  11 +-
 .../client/api/impl/TimelineV2ClientImpl.java   |  80 ++-
 .../api/impl/TestTimelineClientV2Impl.java  |  56 +++-
 .../timelineservice/NMTimelinePublisher.java|   3 +-
 .../TestTimelineServiceClientIntegration.java   |  13 +-
 .../security/TestTimelineAuthFilterForV2.java   |   3 +-
 10 files changed, 301 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6ce422a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 7e8730d..21a6641 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -848,7 +848,8 @@ public class RMContainerAllocator extends 
RMContainerRequestor
   updateAMRMToken(response.getAMRMToken());
 }
 
-List finishedContainers = 
response.getCompletedContainersStatuses();
+List finishedContainers =
+response.getCompletedContainersStatuses();
 
 // propagate preemption requests
 final PreemptionMessage preemptReq = response.getPreemptionMessage();
@@ -877,19 +878,13 @@ public class RMContainerAllocator extends 
RMContainerRequestor
 
 handleUpdatedNodes(response);
 handleJobPriorityChange(response);
-// handle receiving the timeline collector address for this app
-String collectorAddr = null;
-if (response.getCollectorInfo() != null) {
-  collectorAddr = response.getCollectorInfo().getCollectorAddr();
-}
-
+// Handle receiving the timeline collector address and token for this app.
 MRAppMaster.RunningAppContext appContext =
 (MRAppMaster.RunningAppContext)this.getContext();
-if (collectorAddr != null && !collectorAddr.isEmpty()
-&& appContext.getTimelineV2Client() != null) {
-  
appContext.getTimelineV2Client().setTimelineServiceAddress(collectorAddr);
+if (appContext.getTimelineV2Client() != null) {
+  appContext.getTimelineV2Client().
+  setTimelineCollectorInfo(response.getCollectorInfo());
 }
-
 for (ContainerStatus cont : finishedContainers) {
   processFinishedContainer(cont);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6ce422a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 933bd01..1fc6215 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -18,6 +18,7 @@
 
 package 

hadoop git commit: YARN-6047 Documentation updates for TimelineService v2 (Contributed by Rohith Sharma)

2017-08-21 Thread vrushali
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 d337336cb -> 2288f5e6d


YARN-6047 Documentation updates for TimelineService v2 (Contributed by Rohith 
Sharma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2288f5e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2288f5e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2288f5e6

Branch: refs/heads/YARN-5355
Commit: 2288f5e6d022f50b7ad1179d3a8208e57bf67d39
Parents: d337336
Author: Vrushali C 
Authored: Mon Aug 21 20:29:05 2017 -0700
Committer: Vrushali C 
Committed: Mon Aug 21 20:29:56 2017 -0700

--
 hadoop-project/src/site/markdown/index.md.vm|   8 +-
 .../src/site/markdown/TimelineServiceV2.md  | 333 ++-
 2 files changed, 324 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2288f5e6/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 62e21b2..bb7bda2 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -55,17 +55,15 @@ documentation.
 YARN Timeline Service v.2
 ---
 
-We are introducing an early preview (alpha 1) of a major revision of YARN
+We are introducing an early preview (alpha 2) of a major revision of YARN
 Timeline Service: v.2. YARN Timeline Service v.2 addresses two major
 challenges: improving scalability and reliability of Timeline Service, and
 enhancing usability by introducing flows and aggregation.
 
-YARN Timeline Service v.2 alpha 1 is provided so that users and developers
+YARN Timeline Service v.2 alpha 2 is provided so that users and developers
 can test it and provide feedback and suggestions for making it a ready
 replacement for Timeline Service v.1.x. It should be used only in a test
-capacity. Most importantly, security is not enabled. Do not set up or use
-Timeline Service v.2 until security is implemented if security is a
-critical requirement.
+capacity.
 
 More details are available in the
 [YARN Timeline Service 
v.2](./hadoop-yarn/hadoop-yarn-site/TimelineServiceV2.html)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2288f5e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 7435201..86030e0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -73,12 +73,8 @@ The following diagram illustrates the design at a high level.
 
 ### Current Status and Future Plans
 
-YARN Timeline Service v.2 is currently in alpha ("alpha 1"). It is very much 
work in progress, and
-many things can and will change rapidly. Users must enable Timeline Service 
v.2 only on a test or
-experimental cluster to test the feature.
-
-Most importantly, **security is not enabled**. Do not set up or use Timeline 
Service v.2 until
-security is implemented if security is a requirement.
+YARN Timeline Service v.2 is currently in alpha ("alpha 2"). It is a work in 
progress, and
+many things can and will change rapidly.
 
 A complete end-to-end flow of writes and reads is functional, with Apache 
HBase as the backend.
 You should be able to start generating data. When enabled, all YARN-generic 
events are
@@ -95,16 +91,19 @@ resource manager also has its dedicated in-process 
collector. The reader is curr
 instance. Currently, it is not possible to write to Timeline Service outside 
the context of a YARN
 application (i.e. no off-cluster client).
 
+Starting from alpha2, Timeline Service v.2 supports simple authorization in 
terms of a
+configurable whitelist of users and groups who can read timeline data. Cluster 
admins are
+allowed by default to read timeline data.
+
 When YARN Timeline Service v.2 is disabled, one can expect no functional or 
performance impact
 on any other existing functionality.
 
 The work to make it truly production-ready continues. Some key items include
 
 * More robust storage fault tolerance
-* Security
 * Support for off-cluster clients
-* More complete and integrated web UI
 * Better support for long-running apps
+* Support for ACLs
 * Offline (time-based periodic) aggregation for flows, users, and queues for 
reporting and
 analysis
 * Timeline collectors as separate instances from node 

[08/50] [abbrv] hadoop git commit: HDFS-12162. Update listStatus document to describe the behavior when the argument is a file. Contributed by Ajay Kumar.

2017-08-21 Thread aengineer
HDFS-12162. Update listStatus document to describe the behavior when the 
argument is a file. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d72124a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d72124a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d72124a4

Branch: refs/heads/HDFS-7240
Commit: d72124a44268e21ada036242bfbccafc23c52ed0
Parents: 18f3603
Author: Anu Engineer 
Authored: Mon Aug 14 11:32:49 2017 -0700
Committer: Anu Engineer 
Committed: Mon Aug 14 11:32:49 2017 -0700

--
 .../hadoop/fs/http/server/FSOperations.java |  2 +-
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 39 
 2 files changed, 40 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72124a4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index c008802..4b5918a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -669,7 +669,7 @@ public class FSOperations {
 /**
  * Creates a list-status executor.
  *
- * @param path the directory to retrieve the status of its contents.
+ * @param path the directory/file to retrieve the status of its contents.
  * @param filter glob filter to use.
  *
  * @throws IOException thrown if the filter expression is incorrect.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72124a4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 7544c80..03834eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -495,6 +495,45 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileSt
 
 See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
 
+### List a File
+
+* Submit a HTTP GET request.
+
+curl -i  "http://:/webhdfs/v1/?op=LISTSTATUS"
+
+The client receives a response with a [`FileStatuses` JSON 
object](#FileStatuses_JSON_Schema):
+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Content-Length: 427
+
+{
+  "FileStatuses":
+  {
+"FileStatus":
+[
+  {
+"accessTime"  : 1320171722771,
+"blockSize"   : 33554432,
+"childrenNum" : 0,
+"fileId"  : 16390,
+"group"   : "supergroup",
+"length"  : 1366,
+"modificationTime": 1501770633062,
+"owner"   : "webuser",
+"pathSuffix"  : "",
+"permission"  : "644",
+"replication" : 1,
+"storagePolicy"   : 0,
+"type": "FILE"
+  }
+]
+  }
+}
+
+See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
+
+
 ### Iteratively List a Directory
 
 * Submit a HTTP GET request.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: YARN-6900. ZooKeeper based implementation of the FederationStateStore. (Íñigo Goiri via Subru).

2017-08-21 Thread aengineer
YARN-6900. ZooKeeper based implementation of the FederationStateStore. (Íñigo 
Goiri via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de462da0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de462da0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de462da0

Branch: refs/heads/HDFS-7240
Commit: de462da04e167a04b89ecf0f40d464cf39dc6549
Parents: 1455306
Author: Subru Krishnan 
Authored: Wed Aug 16 11:43:24 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Aug 16 11:43:24 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   8 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../hadoop-yarn-server-common/pom.xml   |   5 +
 .../impl/ZookeeperFederationStateStore.java | 634 +++
 .../impl/TestZookeeperFederationStateStore.java |  89 +++
 .../TestFederationStateStoreFacadeRetry.java|  20 +-
 .../src/site/markdown/Federation.md |  56 +-
 7 files changed, 785 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acaef8..8515e0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2629,6 +2629,14 @@ public class YarnConfiguration extends Configuration {
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = "";
 
+  public static final String FEDERATION_STATESTORE_ZK_PREFIX =
+  FEDERATION_PREFIX + "zk-state-store.";
+  /** Parent znode path under which ZKRMStateStore will create znodes. */
+  public static final String FEDERATION_STATESTORE_ZK_PARENT_PATH =
+  FEDERATION_STATESTORE_ZK_PREFIX + "parent-path";
+  public static final String DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH =
+  "/federationstore";
+
   private static final String FEDERATION_STATESTORE_SQL_PREFIX =
   FEDERATION_PREFIX + "state-store.sql.";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 91a8b0a..c40c2c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -96,6 +96,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS);
 
+// Federation StateStore ZK implementation configs to be ignored
+configurationPropsToSkipCompare.add(
+YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH);
+
 // Federation StateStore SQL implementation configs to be ignored
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 441a574..e8d3880 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -130,6 +130,11 @@
 
   
 
+
+  org.apache.curator
+  curator-test
+  test
+
   
 
   


[49/50] [abbrv] hadoop git commit: HDFS-11738. Hedged pread takes more time when block moved from initial locations. Contributed by Vinayakumar B.

2017-08-21 Thread aengineer
HDFS-11738. Hedged pread takes more time when block moved from initial 
locations. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6bfb2fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6bfb2fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6bfb2fc

Branch: refs/heads/HDFS-7240
Commit: b6bfb2fcb2391d51b8de97c01c1290880779132e
Parents: 736ceab
Author: John Zhuge 
Authored: Mon Aug 21 13:44:32 2017 -0700
Committer: John Zhuge 
Committed: Mon Aug 21 13:45:30 2017 -0700

--
 .../hadoop/hdfs/DFSClientFaultInjector.java |   2 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 145 +++
 .../java/org/apache/hadoop/hdfs/TestPread.java  |  26 +++-
 3 files changed, 112 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6bfb2fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
index 748edcd..b58cf16 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
@@ -61,4 +61,6 @@ public class DFSClientFaultInjector {
   public boolean skipRollingRestartWait() {
 return false;
   }
+
+  public void sleepBeforeHedgedGet() {}
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6bfb2fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6bff172..97d3de4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -830,60 +830,85 @@ public class DFSInputStream extends FSInputStream
 
   private DNAddrPair chooseDataNode(LocatedBlock block,
   Collection ignoredNodes) throws IOException {
+return chooseDataNode(block, ignoredNodes, true);
+  }
+
+  /**
+   * Choose datanode to read from.
+   *
+   * @param block Block to choose datanode addr from
+   * @param ignoredNodes  Ignored nodes inside.
+   * @param refetchIfRequired Whether to refetch if no nodes to chose
+   *  from.
+   * @return Returns chosen DNAddrPair; Can be null if refetchIfRequired is
+   * false.
+   */
+  private DNAddrPair chooseDataNode(LocatedBlock block,
+  Collection ignoredNodes, boolean refetchIfRequired)
+  throws IOException {
 while (true) {
   DNAddrPair result = getBestNodeDNAddrPair(block, ignoredNodes);
   if (result != null) {
 return result;
+  } else if (refetchIfRequired) {
+block = refetchLocations(block, ignoredNodes);
   } else {
-String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
-deadNodes, ignoredNodes);
-String blockInfo = block.getBlock() + " file=" + src;
-if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
-  String description = "Could not obtain block: " + blockInfo;
-  DFSClient.LOG.warn(description + errMsg
-  + ". Throwing a BlockMissingException");
-  throw new BlockMissingException(src, description,
-  block.getStartOffset());
-}
-
-DatanodeInfo[] nodes = block.getLocations();
-if (nodes == null || nodes.length == 0) {
-  DFSClient.LOG.info("No node available for " + blockInfo);
-}
-DFSClient.LOG.info("Could not obtain " + block.getBlock()
-+ " from any node: " + errMsg
-+ ". Will get new block locations from namenode and retry...");
-try {
-  // Introducing a random factor to the wait time before another retry.
-  // The wait time is dependent on # of failures and a random factor.
-  // At the first time of getting a BlockMissingException, the wait 
time
-  // is a random number between 0..3000 ms. If the first retry
-  // still fails, we will wait 3000 ms grace period before the 2nd 
retry.
-  // Also at the second retry, the waiting 

[12/50] [abbrv] hadoop git commit: YARN-6917. Queue path is recomputed from scratch on every allocation. Contributed by Eric Payne

2017-08-21 Thread aengineer
YARN-6917. Queue path is recomputed from scratch on every allocation. 
Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55587928
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55587928
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55587928

Branch: refs/heads/HDFS-7240
Commit: 5558792894169425bff054364a1ab4c48b347fb9
Parents: 3325ef6
Author: Jason Lowe 
Authored: Mon Aug 14 15:31:34 2017 -0500
Committer: Jason Lowe 
Committed: Mon Aug 14 15:31:34 2017 -0500

--
 .../resourcemanager/scheduler/capacity/AbstractCSQueue.java  | 8 
 .../server/resourcemanager/scheduler/capacity/LeafQueue.java | 5 -
 .../resourcemanager/scheduler/capacity/ParentQueue.java  | 6 --
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 5fbdead..d7c452a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -76,6 +76,7 @@ public abstract class AbstractCSQueue implements CSQueue {
   private static final Log LOG = LogFactory.getLog(AbstractCSQueue.class);  
   volatile CSQueue parent;
   final String queueName;
+  private final String queuePath;
   volatile int numContainers;
   
   final Resource minimumAllocation;
@@ -119,6 +120,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 this.labelManager = cs.getRMContext().getNodeLabelManager();
 this.parent = parent;
 this.queueName = queueName;
+this.queuePath =
+  ((parent == null) ? "" : (parent.getQueuePath() + ".")) + this.queueName;
 this.resourceCalculator = cs.getResourceCalculator();
 this.activitiesManager = cs.getActivitiesManager();
 
@@ -150,6 +153,11 @@ public abstract class AbstractCSQueue implements CSQueue {
 queueCapacities,
 parent == null ? null : parent.getQueueCapacities());
   }
+
+  @Override
+  public String getQueuePath() {
+return queuePath;
+  }
   
   @Override
   public float getCapacity() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 2e502b7..d15431e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -299,11 +299,6 @@ public class LeafQueue extends AbstractCSQueue {
 }
   }
 
-  @Override
-  public String getQueuePath() {
-return getParent().getQueuePath() + "." + getQueueName();
-  }
-
   /**
* Used only by tests.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 

[06/50] [abbrv] hadoop git commit: YARN-6959. RM may allocate wrong AM Container for new attempt. Contributed by Yuqi Wang

2017-08-21 Thread aengineer
YARN-6959. RM may allocate wrong AM Container for new attempt. Contributed by 
Yuqi Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2f6299f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2f6299f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2f6299f

Branch: refs/heads/HDFS-7240
Commit: e2f6299f6f580d7a03f2377d19ac85f55fd4e73b
Parents: ce797a1
Author: Jian He 
Authored: Mon Aug 14 10:51:04 2017 -0700
Committer: Jian He 
Committed: Mon Aug 14 10:51:30 2017 -0700

--
 .../scheduler/AbstractYarnScheduler.java|  1 +
 .../scheduler/capacity/CapacityScheduler.java   | 13 ++
 .../scheduler/fair/FairScheduler.java   | 15 ++-
 .../scheduler/fifo/FifoScheduler.java   | 15 ++-
 .../scheduler/fair/TestFairScheduler.java   | 46 ++--
 5 files changed, 63 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index d506f4d..79caab0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -323,6 +323,7 @@ public abstract class AbstractYarnScheduler
 
   }
 
+  // TODO: Rename it to getCurrentApplicationAttempt
   public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) {
 SchedulerApplication app = applications.get(
 applicationAttemptId.getApplicationId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 3286982..e4ca003 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -903,6 +903,19 @@ public class CapacityScheduler extends
   ContainerUpdates updateRequests) {
 FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
 if (application == null) {
+  LOG.error("Calling allocate on removed or non existent application " +
+  applicationAttemptId.getApplicationId());
+  return EMPTY_ALLOCATION;
+}
+
+// The allocate may be the leftover from previous attempt, and it will
+// impact current attempt, such as confuse the request and allocation for
+// current attempt's AM container.
+// Note outside precondition check for the attempt id may be
+// outdated here, so double check it here is necessary.
+if (!application.getApplicationAttemptId().equals(applicationAttemptId)) {
+  LOG.error("Calling allocate on previous or removed " +
+  "or non existent application attempt " + applicationAttemptId);
   return EMPTY_ALLOCATION;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 

[13/50] [abbrv] hadoop git commit: HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure durations. Contributed by Hanisha Koneru.

2017-08-21 Thread aengineer
HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure 
durations. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bef4eca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bef4eca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bef4eca

Branch: refs/heads/HDFS-7240
Commit: 8bef4eca28a3466707cc4ea0de0330449319a5eb
Parents: 5558792
Author: Arpit Agarwal 
Authored: Mon Aug 14 15:53:35 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 14 15:53:35 2017 -0700

--
 .../java/org/apache/hadoop/ipc/ProtobufRpcEngine.java | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bef4eca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 639bbad..2c0cfe5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -190,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = Time.now();
+startTime = Time.monotonicNow();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -245,7 +245,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = Time.now() - startTime;
+long callTime = Time.monotonicNow() - startTime;
 LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
   }
   
@@ -373,19 +373,19 @@ public class ProtobufRpcEngine implements RpcEngine {
 this.server = currentCallInfo.get().server;
 this.call = Server.getCurCall().get();
 this.methodName = currentCallInfo.get().methodName;
-this.setupTime = Time.now();
+this.setupTime = Time.monotonicNow();
   }
 
   @Override
   public void setResponse(Message message) {
-long processingTime = Time.now() - setupTime;
+long processingTime = Time.monotonicNow() - setupTime;
 call.setDeferredResponse(RpcWritable.wrap(message));
 server.updateDeferredMetrics(methodName, processingTime);
   }
 
   @Override
   public void error(Throwable t) {
-long processingTime = Time.now() - setupTime;
+long processingTime = Time.monotonicNow() - setupTime;
 String detailedMetricsName = t.getClass().getSimpleName();
 server.updateDeferredMetrics(detailedMetricsName, processingTime);
 call.setDeferredError(t);
@@ -513,7 +513,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 Message param = request.getValue(prototype);
 
 Message result;
-long startTime = Time.now();
+long startTime = Time.monotonicNow();
 int qTime = (int) (startTime - receiveTime);
 Exception exception = null;
 boolean isDeferred = false;
@@ -537,7 +537,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   throw e;
 } finally {
   currentCallInfo.set(null);
-  int processingTime = (int) (Time.now() - startTime);
+  int processingTime = (int) (Time.monotonicNow() - startTime);
   if (LOG.isDebugEnabled()) {
 String msg =
 "Served: " + methodName + (isDeferred ? ", deferred" : "") +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-6965. Duplicate instantiation in FairSchedulerQueueInfo. Contributed by Masahiro Tanaka.

2017-08-21 Thread aengineer
YARN-6965. Duplicate instantiation in FairSchedulerQueueInfo. Contributed by 
Masahiro Tanaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/588c190a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/588c190a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/588c190a

Branch: refs/heads/HDFS-7240
Commit: 588c190afd49bdbd5708f7805bf6c68f09fee142
Parents: 75dd866
Author: Akira Ajisaka 
Authored: Wed Aug 16 14:06:22 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 16 14:06:22 2017 +0900

--
 .../server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java   | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/588c190a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index a4607c2..79339c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -99,7 +99,6 @@ public class FairSchedulerQueueInfo {
 steadyFairResources = new ResourceInfo(queue.getSteadyFairShare());
 fairResources = new ResourceInfo(queue.getFairShare());
 minResources = new ResourceInfo(queue.getMinShare());
-maxResources = new ResourceInfo(queue.getMaxShare());
 maxResources = new ResourceInfo(
 Resources.componentwiseMin(queue.getMaxShare(),
 scheduler.getClusterResource()));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: YARN-6741. Deleting all children of a Parent Queue on refresh throws exception. Contributed by Naganarasimha G R.

2017-08-21 Thread aengineer
YARN-6741. Deleting all children of a Parent Queue on refresh throws exception. 
Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8f74c39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8f74c39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8f74c39

Branch: refs/heads/HDFS-7240
Commit: d8f74c3964fa429a4a53c3651d175792cf00ac81
Parents: 7769e96
Author: bibinchundatt 
Authored: Mon Aug 14 09:39:00 2017 +0530
Committer: bibinchundatt 
Committed: Mon Aug 14 09:39:00 2017 +0530

--
 .../capacity/CapacitySchedulerQueueManager.java |   4 +
 .../scheduler/capacity/ParentQueue.java |  39 +++
 .../capacity/TestCapacityScheduler.java | 114 ++-
 3 files changed, 137 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f74c39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index e33fbb3..1ceb6fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -327,6 +327,10 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
 + "it is not yet in stopped state. Current State : "
 + oldQueue.getState());
   }
+} else if (oldQueue instanceof ParentQueue
+&& newQueue instanceof LeafQueue) {
+  LOG.info("Converting the parent queue: " + oldQueue.getQueuePath()
+  + " to leaf queue.");
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f74c39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index f6ada4f..e0baa07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -18,6 +18,14 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -34,7 +42,6 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.AccessType;
@@ -45,7 +52,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import 

[36/50] [abbrv] hadoop git commit: HDFS-12072. Provide fairness between EC and non-EC recovery tasks. Contributed by Eddy Xu.

2017-08-21 Thread aengineer
HDFS-12072. Provide fairness between EC and non-EC recovery tasks. Contributed 
by Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2989488
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2989488
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2989488

Branch: refs/heads/HDFS-7240
Commit: b29894889742dda654cd88a7ce72a4e51fccb328
Parents: ab1a8ae
Author: Andrew Wang 
Authored: Thu Aug 17 15:26:11 2017 -0700
Committer: Andrew Wang 
Committed: Thu Aug 17 15:26:11 2017 -0700

--
 .../blockmanagement/DatanodeDescriptor.java |  6 +-
 .../server/blockmanagement/DatanodeManager.java | 45 ++---
 .../blockmanagement/TestDatanodeManager.java| 96 +++-
 3 files changed, 108 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2989488/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 2bd4a20..d35894c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -661,7 +661,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
 return erasurecodeBlocks.size();
   }
 
-  public List getReplicationCommand(int maxTransfers) {
+  int getNumberOfReplicateBlocks() {
+return replicateBlocks.size();
+  }
+
+  List getReplicationCommand(int maxTransfers) {
 return replicateBlocks.poll(maxTransfers);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2989488/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 78783ca..c75bcea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1663,21 +1663,38 @@ public class DatanodeManager {
 }
 
 final List cmds = new ArrayList<>();
-// check pending replication
-List pendingList = nodeinfo.getReplicationCommand(
-maxTransfers);
-if (pendingList != null) {
-  cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
-  pendingList));
-  maxTransfers -= pendingList.size();
-}
-// check pending erasure coding tasks
-List pendingECList = nodeinfo
-.getErasureCodeCommand(maxTransfers);
-if (pendingECList != null) {
-  cmds.add(new BlockECReconstructionCommand(
-  DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList));
+// Allocate _approximately_ maxTransfers pending tasks to DataNode.
+// NN chooses pending tasks based on the ratio between the lengths of
+// replication and erasure-coded block queues.
+int totalReplicateBlocks = nodeinfo.getNumberOfReplicateBlocks();
+int totalECBlocks = nodeinfo.getNumberOfBlocksToBeErasureCoded();
+int totalBlocks = totalReplicateBlocks + totalECBlocks;
+if (totalBlocks > 0) {
+  int numReplicationTasks = (int) Math.ceil(
+  (double) (totalReplicateBlocks * maxTransfers) / totalBlocks);
+  int numECTasks = (int) Math.ceil(
+  (double) (totalECBlocks * maxTransfers) / totalBlocks);
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Pending replication tasks: " + numReplicationTasks
++ " erasure-coded tasks: " + numECTasks);
+  }
+  // check pending replication tasks
+  List pendingList = nodeinfo.getReplicationCommand(
+  numReplicationTasks);
+  if (pendingList != null && !pendingList.isEmpty()) {
+cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
+pendingList));
+  }
+  // check pending erasure coding tasks
+  List pendingECList = nodeinfo
+  .getErasureCodeCommand(numECTasks);
+  if (pendingECList != null && !pendingECList.isEmpty()) {
+cmds.add(new BlockECReconstructionCommand(
+

[33/50] [abbrv] hadoop git commit: YARN-3254. HealthReport should include disk full information. Contributed by Suma Shivaprasad.

2017-08-21 Thread aengineer
YARN-3254. HealthReport should include disk full information. Contributed by 
Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9a0e233
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9a0e233
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9a0e233

Branch: refs/heads/HDFS-7240
Commit: f9a0e2338150f1bd3ba2c29f76979183fd3ed80c
Parents: 1f04cb4
Author: Sunil G 
Authored: Thu Aug 17 15:07:15 2017 +0530
Committer: Sunil G 
Committed: Thu Aug 17 15:07:15 2017 +0530

--
 .../server/nodemanager/DirectoryCollection.java | 61 +++-
 .../nodemanager/LocalDirsHandlerService.java| 59 +++
 .../nodemanager/TestDirectoryCollection.java| 23 
 3 files changed, 130 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9a0e233/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index ae2a4ef..502485f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -38,6 +38,7 @@ import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -99,6 +100,7 @@ public class DirectoryCollection {
   private List localDirs;
   private List errorDirs;
   private List fullDirs;
+  private Map directoryErrorInfo;
 
   // read/write lock for accessing above directories.
   private final ReadLock readLock;
@@ -192,6 +194,7 @@ public class DirectoryCollection {
 localDirs = new CopyOnWriteArrayList<>(dirs);
 errorDirs = new CopyOnWriteArrayList<>();
 fullDirs = new CopyOnWriteArrayList<>();
+directoryErrorInfo = new ConcurrentHashMap<>();
 
 ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
 this.readLock = lock.readLock();
@@ -248,11 +251,25 @@ public class DirectoryCollection {
   /**
* @return the directories that have used all disk space
*/
-
   List getFullDirs() {
 this.readLock.lock();
 try {
-  return fullDirs;
+  return Collections.unmodifiableList(fullDirs);
+} finally {
+  this.readLock.unlock();
+}
+  }
+
+  /**
+   * @return the directories that have errors - many not have appropriate 
permissions
+   * or other disk validation checks might have failed in {@link DiskValidator}
+   *
+   */
+  @InterfaceStability.Evolving
+  List getErroredDirs() {
+this.readLock.lock();
+try {
+  return Collections.unmodifiableList(errorDirs);
 } finally {
   this.readLock.unlock();
 }
@@ -271,6 +288,39 @@ public class DirectoryCollection {
   }
 
   /**
+   *
+   * @param dirName Absolute path of Directory for which error diagnostics are 
needed
+   * @return DiskErrorInformation - disk error diagnostics for the specified 
directory
+   * null - the disk associated with the directory has passed disk 
utilization checks
+   * /error validations in {@link DiskValidator}
+   *
+   */
+  @InterfaceStability.Evolving
+  DiskErrorInformation getDirectoryErrorInfo(String dirName) {
+this.readLock.lock();
+try {
+  return directoryErrorInfo.get(dirName);
+} finally {
+  this.readLock.unlock();
+}
+  }
+
+  /**
+   *
+   * @param dirName Absolute path of Directory for which the disk has been 
marked as unhealthy
+   * @return Check if disk associated with the directory is unhealthy
+   */
+  @InterfaceStability.Evolving
+  boolean isDiskUnHealthy(String dirName) {
+this.readLock.lock();
+try {
+  return directoryErrorInfo.containsKey(dirName);
+} finally {
+  this.readLock.unlock();
+}
+  }
+
+  /**
* Create any non-existent directories and parent directories, updating the
* list of valid 

[09/50] [abbrv] hadoop git commit: YARN-6905 Multiple HBaseTimelineStorage test failures due to missing FastNumberFormat (Contributed by Haibo Chen)

2017-08-21 Thread aengineer
YARN-6905 Multiple HBaseTimelineStorage test failures due to missing 
FastNumberFormat (Contributed by Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/608a06cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/608a06cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/608a06cc

Branch: refs/heads/HDFS-7240
Commit: 608a06cca5d68b3155bd70a94bf29ae0942b9ca0
Parents: d72124a
Author: Vrushali C 
Authored: Mon Aug 14 11:40:27 2017 -0700
Committer: Vrushali C 
Committed: Mon Aug 14 11:41:11 2017 -0700

--
 .../storage/TestHBaseTimelineStorageApps.java   |  4 +-
 .../TestHBaseTimelineStorageEntities.java   | 14 ---
 .../storage/common/AppIdKeyConverter.java   |  3 +-
 .../common/HBaseTimelineStorageUtils.java   | 33 +
 .../TestCustomApplicationIdConversion.java  | 39 
 5 files changed, 86 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/608a06cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index b3e5197..3948d23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -69,6 +69,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.Applica
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
@@ -493,7 +494,8 @@ public class TestHBaseTimelineStorageApps {
 event.addInfo(expKey, expVal);
 
 final TimelineEntity entity = new ApplicationEntity();
-entity.setId(ApplicationId.newInstance(0, 1).toString());
+entity.setId(HBaseTimelineStorageUtils.convertApplicationIdToString(
+ApplicationId.newInstance(0, 1)));
 entity.addEvent(event);
 
 TimelineEntities entities = new TimelineEntities();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608a06cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 4b4c3e1..e18d0d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -62,6 +62,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefi
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;

[21/50] [abbrv] hadoop git commit: YARN-5146. Support for Fair Scheduler in new YARN UI. Contributed by Abdullah Yousufi.

2017-08-21 Thread aengineer
YARN-5146. Support for Fair Scheduler in new YARN UI. Contributed by Abdullah 
Yousufi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dadb0c22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dadb0c22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dadb0c22

Branch: refs/heads/HDFS-7240
Commit: dadb0c2225adef5cb0126610733c285b51f4f43e
Parents: e3ae3e2
Author: Sunil G 
Authored: Tue Aug 15 21:58:44 2017 +0530
Committer: Sunil G 
Committed: Tue Aug 15 21:58:44 2017 +0530

--
 .../src/main/webapp/app/adapters/yarn-queue.js  |  30 -
 .../app/adapters/yarn-queue/capacity-queue.js   |  23 
 .../app/adapters/yarn-queue/fair-queue.js   |  23 
 .../app/adapters/yarn-queue/fifo-queue.js   |  23 
 .../app/adapters/yarn-queue/yarn-queue.js   |  30 +
 .../main/webapp/app/components/tree-selector.js |  19 ++-
 .../src/main/webapp/app/models/yarn-queue.js|  94 --
 .../app/models/yarn-queue/capacity-queue.js |  95 ++
 .../webapp/app/models/yarn-queue/fair-queue.js  |  79 
 .../webapp/app/models/yarn-queue/fifo-queue.js  |  52 
 .../webapp/app/models/yarn-queue/yarn-queue.js  |  23 
 .../main/webapp/app/routes/cluster-overview.js  |   4 +-
 .../src/main/webapp/app/routes/yarn-queue.js|  26 ++--
 .../src/main/webapp/app/routes/yarn-queues.js   |  12 +-
 .../main/webapp/app/routes/yarn-queues/index.js |  25 
 .../app/routes/yarn-queues/queues-selector.js   |  25 
 .../main/webapp/app/serializers/yarn-queue.js   | 129 ---
 .../serializers/yarn-queue/capacity-queue.js| 128 ++
 .../app/serializers/yarn-queue/fair-queue.js|  92 +
 .../app/serializers/yarn-queue/fifo-queue.js|  59 +
 .../app/serializers/yarn-queue/yarn-queue.js|  47 +++
 .../components/queue-configuration-table.hbs|  54 
 .../templates/components/queue-navigator.hbs|   7 +-
 .../yarn-queue/capacity-queue-conf-table.hbs|  54 
 .../yarn-queue/capacity-queue-info.hbs  |  84 
 .../components/yarn-queue/capacity-queue.hbs|  63 +
 .../yarn-queue/fair-queue-conf-table.hbs|  52 
 .../components/yarn-queue/fair-queue-info.hbs   |  66 ++
 .../components/yarn-queue/fair-queue.hbs|  63 +
 .../yarn-queue/fifo-queue-conf-table.hbs|  56 
 .../components/yarn-queue/fifo-queue-info.hbs   |  47 +++
 .../components/yarn-queue/fifo-queue.hbs|  48 +++
 .../webapp/app/templates/yarn-queue/info.hbs|  73 +--
 .../main/webapp/app/templates/yarn-queues.hbs   |  54 +---
 .../src/main/webapp/app/utils/color-utils.js|   1 -
 35 files changed, 1266 insertions(+), 494 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadb0c22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
deleted file mode 100644
index f2017df..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import AbstractAdapter from './abstract';
-
-export default AbstractAdapter.extend({
-  address: "rmWebAddress",
-  restNameSpace: "cluster",
-  serverName: "RM",
-
-  pathForType(/*modelName*/) {
-return 'scheduler'; // move to some common place, return path by modelname.
-  }
-
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadb0c22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js
--
diff --git 

[15/50] [abbrv] hadoop git commit: YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType update. (Kartheek Muthyala via asuresh)

2017-08-21 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7be1d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index aeba399..a1c247b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -27,6 +27,8 @@ import java.util.List;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
@@ -37,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ConfigurationException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -951,4 +954,97 @@ public class TestContainerSchedulerQueuing extends 
BaseContainerManagerTest {
 map.get(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED)
 .getContainerId());
   }
+
+  /**
+   * Starts one OPPORTUNISTIC container that takes up the whole node's
+   * resources, and submit one more that will be queued. Now promote the
+   * queued OPPORTUNISTIC container, which should kill the current running
+   * OPPORTUNISTIC container to make room for the promoted request.
+   * @throws Exception
+   */
+  @Test
+  public void testPromotionOfOpportunisticContainers() throws Exception {
+containerManager.start();
+
+ContainerLaunchContext containerLaunchContext =
+recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+List list = new ArrayList<>();
+list.add(StartContainerRequest.newInstance(
+containerLaunchContext,
+createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
+context.getNodeId(),
+user, BuilderUtils.newResource(2048, 1),
+context.getContainerTokenSecretManager(), null,
+ExecutionType.OPPORTUNISTIC)));
+list.add(StartContainerRequest.newInstance(
+containerLaunchContext,
+createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
+context.getNodeId(),
+user, BuilderUtils.newResource(1024, 1),
+context.getContainerTokenSecretManager(), null,
+ExecutionType.OPPORTUNISTIC)));
+
+StartContainersRequest allRequests =
+StartContainersRequest.newInstance(list);
+containerManager.startContainers(allRequests);
+
+Thread.sleep(5000);
+
+// Ensure first container is running and others are queued.
+List statList = new ArrayList();
+for (int i = 0; i < 3; i++) {
+  statList.add(createContainerId(i));
+}
+GetContainerStatusesRequest statRequest = GetContainerStatusesRequest
+.newInstance(Arrays.asList(createContainerId(0)));
+List containerStatuses = containerManager
+.getContainerStatuses(statRequest).getContainerStatuses();
+for (ContainerStatus status : containerStatuses) {
+  if (status.getContainerId().equals(createContainerId(0))) {
+Assert.assertEquals(
+org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
+status.getState());
+  } else {
+Assert.assertEquals(
+org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED,
+status.getState());
+  }
+}
+
+ContainerScheduler containerScheduler =
+containerManager.getContainerScheduler();
+// Ensure two containers are properly queued.
+

[14/50] [abbrv] hadoop git commit: HADOOP-14673. Remove leftover hadoop_xml_escape from functions. Contributed by Ajay Kumar.

2017-08-21 Thread aengineer
HADOOP-14673. Remove leftover hadoop_xml_escape from functions. Contributed by 
Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04465113
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04465113
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04465113

Branch: refs/heads/HDFS-7240
Commit: 044651139800b9e2e5b8f224772e6dbd6ded58c6
Parents: 8bef4ec
Author: Arpit Agarwal 
Authored: Mon Aug 14 16:22:10 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 14 16:22:10 2017 -0700

--
 .../src/main/bin/hadoop-functions.sh| 23 --
 .../src/test/scripts/hadoop_escape_chars.bats   | 32 
 2 files changed, 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04465113/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 3cf21cf..9ea4587 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -2578,29 +2578,6 @@ function hadoop_parse_args
   hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
 }
 
-## @description  XML-escapes the characters (&'"<>) in the given parameter.
-## @audience private
-## @stabilityevolving
-## @replaceable  yes
-## @paramstring
-## @return   XML-escaped string
-function hadoop_xml_escape
-{
-  sed -e 's/&/\/g' -e 's/"/\\\/g' \
--e "s/'/\/g" -e 's//\\\/g' <<< "$1"
-}
-
-## @description  sed-escapes the characters (\/&) in the given parameter.
-## @audience private
-## @stabilityevolving
-## @replaceable  yes
-## @paramstring
-## @return   sed-escaped string
-function hadoop_sed_escape
-{
-  sed -e 's/[\/&]/\\&/g' <<< "$1"
-}
-
 ## @description Handle subcommands from main program entries
 ## @audience private
 ## @stability evolving

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04465113/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
deleted file mode 100755
index 9b031f2..000
--- 
a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load hadoop-functions_test_helper
-
-@test "hadoop_escape_sed (positive 1)" {
-  ret="$(hadoop_sed_escape "\pass&\0#\$asdf/g  ><'\"~\`!@#$%^&*()_+-=")"
-  expected="pass\&0#\$asdf\/g  ><'\"~\`!@#$%^\&*()_+-="
-  echo "actual >${ret}<"
-  echo "expected >${expected}<"
-  [ "${ret}" = "${expected}" ]
-}
-
-@test "hadoop_escape_xml (positive 1)" {
-  ret="$(hadoop_xml_escape "\pass&\0#\$asdf/g  ><'\"~\`!@#$%^&*()_+-=")"
-  expected="\\password\0#\$asdf/g  
~\`!@#\$%^*()_+-="
-  echo "actual >${ret}<"
-  echo "expected >${expected}<"
-  [ "${ret}" = "${expected}" ]
-}
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HDFS-12269. Better to return a Map rather than HashMap in getErasureCodingCodecs. Contributed by Huafeng Wang.

2017-08-21 Thread aengineer
HDFS-12269. Better to return a Map rather than HashMap in 
getErasureCodingCodecs. Contributed by Huafeng Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08aaa4b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08aaa4b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08aaa4b3

Branch: refs/heads/HDFS-7240
Commit: 08aaa4b36fab44c3f47878b3c487db3b373ffccf
Parents: ab051bd
Author: Akira Ajisaka 
Authored: Thu Aug 17 13:20:27 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Aug 17 13:20:27 2017 +0900

--
 .../java/org/apache/hadoop/io/erasurecode/CodecRegistry.java| 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java | 2 +-
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java | 3 +--
 .../java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java| 4 ++--
 .../hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java | 5 +++--
 .../ClientNamenodeProtocolServerSideTranslatorPB.java   | 3 +--
 .../hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java   | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 4 ++--
 .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java   | 4 ++--
 .../src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 3 +--
 .../java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java  | 4 ++--
 11 files changed, 18 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08aaa4b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
index fcf1349..daf91e2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
@@ -176,7 +176,7 @@ public final class CodecRegistry {
* @return a map of all codec names, and their corresponding code list
* separated by ','.
*/
-  public HashMap getCodec2CoderCompactMap() {
+  public Map getCodec2CoderCompactMap() {
 return coderNameCompactMap;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08aaa4b3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 88b273a..969522d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2764,7 +2764,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public HashMap getErasureCodingCodecs() throws IOException {
+  public Map getErasureCodingCodecs() throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("getErasureCodingCodecs")) {
   return namenode.getErasureCodingCodecs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08aaa4b3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index cd368d4..8f82d03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -26,7 +26,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -2585,7 +2584,7 @@ public class DistributedFileSystem extends FileSystem {
* @return all erasure coding codecs and coders supported by this file 
system.
* @throws IOException
*/
-  public HashMap getAllErasureCodingCodecs()
+  public Map getAllErasureCodingCodecs()
   throws IOException {
 

[35/50] [abbrv] hadoop git commit: YARN-6988. container-executor fails for docker when command length > 4096 B. Contributed by Eric Badger

2017-08-21 Thread aengineer
YARN-6988. container-executor fails for docker when command length > 4096 B. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab1a8ae8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab1a8ae8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab1a8ae8

Branch: refs/heads/HDFS-7240
Commit: ab1a8ae85f8c61304a0f437cdc61cc5aeda36a4b
Parents: dd7916d
Author: Jason Lowe 
Authored: Thu Aug 17 15:50:14 2017 -0500
Committer: Jason Lowe 
Committed: Thu Aug 17 15:50:14 2017 -0500

--
 .../impl/container-executor.c   | 38 +---
 .../main/native/container-executor/impl/util.h  |  7 
 2 files changed, 33 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab1a8ae8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 9f754c4..7361808 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1417,9 +1417,10 @@ int run_docker(const char *command_file) {
   char* docker_command = parse_docker_command_file(command_file);
   char* docker_binary = get_section_value(DOCKER_BINARY_KEY, _cfg);
   docker_binary = check_docker_binary(docker_binary);
+  size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024);
 
-  char* docker_command_with_binary = calloc(sizeof(char), EXECUTOR_PATH_MAX);
-  snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", 
docker_binary, docker_command);
+  char* docker_command_with_binary = calloc(sizeof(char), command_size);
+  snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, 
docker_command);
   char **args = split_delimiter(docker_command_with_binary, " ");
 
   int exit_code = -1;
@@ -1567,16 +1568,24 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   char *script_file_dest = NULL;
   char *cred_file_dest = NULL;
   char *exit_code_file = NULL;
-  char docker_command_with_binary[EXECUTOR_PATH_MAX];
-  char docker_wait_command[EXECUTOR_PATH_MAX];
-  char docker_logs_command[EXECUTOR_PATH_MAX];
-  char docker_inspect_command[EXECUTOR_PATH_MAX];
-  char docker_rm_command[EXECUTOR_PATH_MAX];
+  char *docker_command_with_binary = NULL;
+  char *docker_wait_command = NULL;
+  char *docker_logs_command = NULL;
+  char *docker_inspect_command = NULL;
+  char *docker_rm_command = NULL;
   int container_file_source =-1;
   int cred_file_source = -1;
   int BUFFER_SIZE = 4096;
   char buffer[BUFFER_SIZE];
 
+  size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024);
+
+  docker_command_with_binary = calloc(sizeof(char), command_size);
+  docker_wait_command = calloc(sizeof(char), command_size);
+  docker_logs_command = calloc(sizeof(char), command_size);
+  docker_inspect_command = calloc(sizeof(char), command_size);
+  docker_rm_command = calloc(sizeof(char), command_size);
+
   gid_t user_gid = getegid();
   uid_t prev_uid = geteuid();
 
@@ -1621,7 +1630,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
 goto cleanup;
   }
 
-  snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", 
docker_binary, docker_command);
+  snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, 
docker_command);
 
   fprintf(LOGFILE, "Launching docker container...\n");
   FILE* start_docker = popen(docker_command_with_binary, "r");
@@ -1634,7 +1643,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
 goto cleanup;
   }
 
-  snprintf(docker_inspect_command, EXECUTOR_PATH_MAX,
+  snprintf(docker_inspect_command, command_size,
 "%s inspect --format {{.State.Pid}} %s",
 docker_binary, container_id);
 
@@ -1679,7 +1688,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   goto cleanup;
 }
 
-snprintf(docker_wait_command, EXECUTOR_PATH_MAX,
+snprintf(docker_wait_command, command_size,
   "%s wait %s", docker_binary, container_id);
 
 fprintf(LOGFILE, "Waiting for docker container to finish...\n");
@@ -1693,7 +1702,7 @@ int 

[18/50] [abbrv] hadoop git commit: HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin.

2017-08-21 Thread aengineer
HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e43c28e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e43c28e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e43c28e

Branch: refs/heads/HDFS-7240
Commit: 2e43c28e01fe006210e71aab179527669f6412ed
Parents: 645a8f2
Author: Yiqun Lin 
Authored: Tue Aug 15 16:48:49 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Aug 15 16:48:49 2017 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 +++--
 .../hdfs/server/protocol/SlowDiskReports.java   |  5 ++--
 .../dev-support/findbugsExcludeFile.xml | 26 +++
 .../hdfs/qjournal/server/JournalNode.java   | 16 +++-
 .../hdfs/server/datanode/DataStorage.java   | 12 ++---
 .../namenode/NNStorageRetentionManager.java | 27 +++-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  6 ++---
 .../offlineImageViewer/ImageLoaderCurrent.java  | 10 +---
 .../namenode/TestNameNodeOptionParsing.java | 27 +++-
 9 files changed, 103 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 677ea35..88b273a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2901,9 +2901,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
 synchronized (DFSClient.class) {
   if (STRIPED_READ_THREAD_POOL == null) {
-STRIPED_READ_THREAD_POOL = DFSUtilClient.getThreadPoolExecutor(1,
+// Only after thread pool is fully constructed then save it to
+// volatile field.
+ThreadPoolExecutor threadPool = DFSUtilClient.getThreadPoolExecutor(1,
 numThreads, 60, "StripedRead-", true);
-STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
+threadPool.allowCoreThreadTimeOut(true);
+STRIPED_READ_THREAD_POOL = threadPool;
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index 8095c2a..496389a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -101,8 +101,9 @@ public final class SlowDiskReports {
 }
 
 boolean areEqual;
-for (String disk : this.slowDisks.keySet()) {
-  if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
+for (Map.Entry> entry : this.slowDisks
+.entrySet()) {
+  if (!entry.getValue().equals(that.slowDisks.get(entry.getKey( {
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 2a7824a..9582fcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -264,4 +264,30 @@
 
 
 
+
+   
+   
+   
+ 
+ 
+ 
+   
+   
+   
+ 
+ 
+   
+   
+   
+ 
+ 
+   
+   
+   
+ 
+ 
+  
+  
+  
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
--
diff --git 

[07/50] [abbrv] hadoop git commit: YARN-6996. Change javax.cache library implementation from JSR107 to Apache Geronimo. (Ray Chiang via Subru).

2017-08-21 Thread aengineer
YARN-6996. Change javax.cache library implementation from JSR107 to Apache 
Geronimo. (Ray Chiang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18f3603b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18f3603b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18f3603b

Branch: refs/heads/HDFS-7240
Commit: 18f3603bce37e0e07c9075811b1179afc2c227eb
Parents: e2f6299
Author: Subru Krishnan 
Authored: Mon Aug 14 11:10:00 2017 -0700
Committer: Subru Krishnan 
Committed: Mon Aug 14 11:10:00 2017 -0700

--
 hadoop-project/pom.xml | 6 +++---
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml   | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f3603b/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6311cd9..8c1d374 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -96,7 +96,7 @@
 2.0.0-M21
 1.0.0-M33
 
-1.0.0
+1.0-alpha-1
 3.3.1
 2.4.12
 6.2.1.jre7
@@ -1276,8 +1276,8 @@
   1.0.0
 
 
-  javax.cache
-  cache-api
+  org.apache.geronimo.specs
+  geronimo-jcache_1.0_spec
   ${jcache.version}
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f3603b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 5f85097..441a574 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -103,8 +103,8 @@
   leveldbjni-all
 
 
-  javax.cache
-  cache-api
+  org.apache.geronimo.specs
+  geronimo-jcache_1.0_spec
 
 
   org.ehcache


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDFS-12066. When Namenode is in safemode, may not allowed to remove an user's erasure coding policy. Contributed by lufei.

2017-08-21 Thread aengineer
HDFS-12066. When Namenode is in safemode,may not allowed to remove an user's 
erasure coding policy. Contributed by lufei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3ae3e26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3ae3e26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3ae3e26

Branch: refs/heads/HDFS-7240
Commit: e3ae3e26446c2e98b7aebc4ea66256cfdb4a397f
Parents: 1040bae
Author: Wei-Chiu Chuang 
Authored: Tue Aug 15 07:41:10 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 15 07:41:43 2017 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java  | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3ae3e26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index caf73f7..1cfaa54 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7113,6 +7113,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 boolean success = false;
 writeLock();
 try {
+  checkOperation(OperationCategory.WRITE);
+  checkNameNodeSafeMode("Cannot remove erasure coding policy "
+  + ecPolicyName);
   FSDirErasureCodingOp.removeErasureCodePolicy(this, ecPolicyName);
   success = true;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3ae3e26/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index bc95ec7..f25d28f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -482,6 +482,15 @@ public class TestSafeMode {
   // expected
 }
 
+try {
+  dfs.removeErasureCodingPolicy("testECName");
+  fail("RemoveErasureCodingPolicy should have failed.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(
+  "Cannot remove erasure coding policy", ioe);
+  // expected
+}
+
 assertFalse("Could not leave SM",
 dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2017-08-21 Thread aengineer
Merge branch 'trunk' into HDFS-7240

 Conflicts:
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
hadoop-project/pom.xml

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0bd0f62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0bd0f62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0bd0f62

Branch: refs/heads/HDFS-7240
Commit: d0bd0f623338dbb558d0dee5e747001d825d92c5
Parents: 7d13259 b6bfb2f
Author: Anu Engineer 
Authored: Mon Aug 21 18:57:15 2017 -0700
Committer: Anu Engineer 
Committed: Mon Aug 21 18:57:15 2017 -0700

--
 LICENSE.txt | 1 +
 dev-support/docker/hadoop_env_checks.sh | 2 +-
 dev-support/findHangingTest.sh  | 2 +-
 dev-support/verify-xml.sh   | 2 +-
 .../resources/assemblies/hadoop-yarn-dist.xml   | 1 +
 .../hadoop-client-check-invariants/pom.xml  | 1 -
 .../hadoop-client-check-test-invariants/pom.xml | 1 -
 .../hadoop-client-minicluster/pom.xml   |33 +
 .../hadoop-client-runtime/pom.xml   | 7 -
 .../dev-support/findbugsExcludeFile.xml | 4 +
 hadoop-common-project/hadoop-common/pom.xml | 3 +-
 .../hadoop-common/src/main/bin/hadoop   |28 +-
 .../src/main/bin/hadoop-functions.sh|   184 +-
 .../org/apache/hadoop/conf/Configuration.java   |51 +-
 .../key/kms/LoadBalancingKMSClientProvider.java | 4 +-
 .../hadoop/fs/CommonConfigurationKeys.java  |21 +
 .../fs/CommonConfigurationKeysPublic.java   | 2 +
 .../src/main/java/org/apache/hadoop/fs/DF.java  | 9 +-
 .../hadoop/fs/FSDataOutputStreamBuilder.java|   203 +-
 .../apache/hadoop/fs/FileEncryptionInfo.java| 6 +-
 .../java/org/apache/hadoop/fs/FileStatus.java   |   189 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |24 +-
 .../java/org/apache/hadoop/fs/FileUtil.java | 4 +-
 .../org/apache/hadoop/fs/FsUrlConnection.java   |10 +
 .../hadoop/fs/FsUrlStreamHandlerFactory.java|26 +-
 .../org/apache/hadoop/fs/LocatedFileStatus.java |64 +-
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java | 4 +-
 .../hadoop/fs/permission/FsPermission.java  |14 +-
 .../apache/hadoop/fs/protocolPB/PBHelper.java   |   131 +
 .../hadoop/fs/protocolPB/package-info.java  |18 +
 .../apache/hadoop/fs/sftp/SFTPFileSystem.java   | 6 +
 .../org/apache/hadoop/fs/shell/AclCommands.java | 6 +-
 .../hadoop/fs/shell/CommandWithDestination.java | 4 +-
 .../java/org/apache/hadoop/fs/shell/Ls.java | 4 +-
 .../java/org/apache/hadoop/fs/shell/Stat.java   |19 +-
 .../hadoop/fs/viewfs/ViewFsFileStatus.java  | 8 +-
 .../fs/viewfs/ViewFsLocatedFileStatus.java  | 6 -
 .../org/apache/hadoop/http/HttpServer2.java | 9 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java | 2 +-
 .../java/org/apache/hadoop/io/SequenceFile.java | 2 +-
 .../hadoop/io/erasurecode/CodecRegistry.java| 2 +-
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 6 +-
 .../io/erasurecode/ErasureCodeConstants.java| 8 +
 .../hadoop/io/retry/RetryInvocationHandler.java |11 +
 .../hadoop/security/CompositeGroupsMapping.java | 4 +-
 .../org/apache/hadoop/util/GenericsUtil.java|15 +
 .../hadoop/util/curator/ZKCuratorManager.java   |   340 +
 .../hadoop/util/curator/package-info.java   |27 +
 .../hadoop-common/src/main/proto/FSProtos.proto |69 +
 .../src/main/resources/core-default.xml |95 +-
 .../src/site/markdown/FileSystemShell.md| 4 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |20 +-
 .../src/site/markdown/SecureMode.md | 9 +-
 .../src/site/markdown/ServiceLevelAuth.md   |32 +-
 .../src/site/markdown/UnixShellGuide.md | 4 +-
 .../src/site/markdown/filesystem/filesystem.md  |57 +-
 .../filesystem/fsdataoutputstreambuilder.md |   182 +
 .../src/site/markdown/filesystem/index.md   | 1 +
 .../conf/TestCommonConfigurationFields.java | 6 +
 .../apache/hadoop/conf/TestConfiguration.java   |   229 +-
 .../org/apache/hadoop/fs/TestFileStatus.java| 1 +
 .../apache/hadoop/fs/TestLocalFileSystem.java   |78 +-
 .../fs/contract/AbstractContractAppendTest.java |33 +-
 .../fs/contract/AbstractContractCreateTest.java |90 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |88 +-
 .../fs/protocolPB/TestFSSerialization.java  |85 +
 .../org/apache/hadoop/http/TestHttpServer.java  |

[37/50] [abbrv] hadoop git commit: HDFS-12316. Verify HDFS snapshot deletion doesn't crash the ongoing file writes.

2017-08-21 Thread aengineer
HDFS-12316. Verify HDFS snapshot deletion doesn't crash the ongoing file writes.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4230872d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4230872d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4230872d

Branch: refs/heads/HDFS-7240
Commit: 4230872dd66d748172903b1522885b03f34bbf9b
Parents: b298948
Author: Manoj Govindassamy 
Authored: Thu Aug 17 16:23:48 2017 -0700
Committer: Manoj Govindassamy 
Committed: Thu Aug 17 16:23:48 2017 -0700

--
 .../snapshot/TestOpenFilesWithSnapshot.java | 109 +++
 1 file changed, 109 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4230872d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index fb83a3e..bf27f2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -23,7 +23,11 @@ import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
@@ -38,12 +42,15 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestOpenFilesWithSnapshot {
+  private static final Log LOG =
+  LogFactory.getLog(TestOpenFilesWithSnapshot.class.getName());
   private final Configuration conf = new Configuration();
   MiniDFSCluster cluster = null;
   DistributedFileSystem fs = null;
@@ -622,6 +629,108 @@ public class TestOpenFilesWithSnapshot {
 hbaseOutputStream.close();
   }
 
+  /**
+   * Test client writing to open files are not interrupted when snapshots
+   * that captured open files get deleted.
+   */
+  @Test (timeout = 24)
+  public void testOpenFileWritingAcrossSnapDeletion() throws Exception {
+final Path snapRootDir = new Path("/level_0_A");
+final String flumeFileName = "flume.log";
+final String hbaseFileName = "hbase.log";
+final String snap1Name = "snap_1";
+final String snap2Name = "snap_2";
+final String snap3Name = "snap_3";
+
+// Create files and open streams
+final Path flumeFile = new Path(snapRootDir, flumeFileName);
+FSDataOutputStream flumeOut = fs.create(flumeFile, false,
+8000, (short)3, 1048576);
+flumeOut.close();
+final Path hbaseFile = new Path(snapRootDir, hbaseFileName);
+FSDataOutputStream hbaseOut = fs.create(hbaseFile, false,
+8000, (short)3, 1048576);
+hbaseOut.close();
+
+final AtomicBoolean writerError = new AtomicBoolean(false);
+final CountDownLatch startLatch = new CountDownLatch(1);
+final CountDownLatch deleteLatch = new CountDownLatch(1);
+Thread t = new Thread(new Runnable() {
+  @Override
+  public void run() {
+try {
+  FSDataOutputStream flumeOutputStream = fs.append(flumeFile, 8000);
+  FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile, 8000);
+  byte[] bytes = new byte[(int) (1024 * 0.2)];
+  Random r = new Random(Time.now());
+
+  for (int i = 0; i < 20; i++) {
+r.nextBytes(bytes);
+flumeOutputStream.write(bytes);
+if (hbaseOutputStream != null) {
+  hbaseOutputStream.write(bytes);
+}
+if (i == 5) {
+  startLatch.countDown();
+} else if (i == 10) {
+  deleteLatch.countDown();
+} else if (i == 15) {
+  hbaseOutputStream.hsync();
+  fs.delete(hbaseFile, true);
+  try {
+hbaseOutputStream.close();
+  } catch (Exception e) {
+  

[34/50] [abbrv] hadoop git commit: HDFS-12250. Reduce usage of FsPermissionExtension in unit tests. Contributed by Chris Douglas.

2017-08-21 Thread aengineer
HDFS-12250. Reduce usage of FsPermissionExtension in unit tests. Contributed by 
Chris Douglas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd7916d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd7916d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd7916d3

Branch: refs/heads/HDFS-7240
Commit: dd7916d3cd5d880d0b257d229f43f10feff04c93
Parents: f9a0e23
Author: Andrew Wang 
Authored: Thu Aug 17 09:35:36 2017 -0700
Committer: Andrew Wang 
Committed: Thu Aug 17 09:35:36 2017 -0700

--
 .../hadoop/fs/permission/FsPermission.java  |  2 +-
 .../org/apache/hadoop/fs/shell/AclCommands.java |  6 ++---
 .../hadoop/fs/shell/CommandWithDestination.java |  4 ++--
 .../java/org/apache/hadoop/fs/shell/Ls.java |  4 ++--
 .../fs/http/client/BaseTestHttpFSWith.java  |  1 +
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 24 ++--
 .../hdfs/server/namenode/FSAclBaseTest.java |  6 +
 .../ClientDistributedCacheManager.java  |  6 ++---
 .../apache/hadoop/fs/adl/TestGetFileStatus.java |  1 +
 .../hadoop/tools/CopyListingFileStatus.java |  4 ++--
 .../apache/hadoop/tools/util/DistCpUtils.java   |  4 +---
 11 files changed, 33 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7916d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
index 23692de..031092b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
@@ -163,7 +163,7 @@ public class FsPermission implements Writable, Serializable,
*/
   public static FsPermission read(DataInput in) throws IOException {
 FsPermission p = new FsPermission();
-p.readFields(in);
+p.fromShort(in.readShort());
 return p;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7916d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
index a5e386c..701c9de 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
@@ -86,9 +86,9 @@ class AclCommands extends FsCommand {
   (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
   }
 
-  AclStatus aclStatus = null;
-  List entries = null;
-  if (perm.getAclBit()) {
+  final AclStatus aclStatus;
+  final List entries;
+  if (item.stat.hasAcl()) {
 aclStatus = item.fs.getAclStatus(item.path);
 entries = aclStatus.getEntries();
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7916d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index 2a483c0..0bd4882 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -444,8 +444,8 @@ abstract class CommandWithDestination extends FsCommand {
   src.stat.getPermission());
 }
 if (shouldPreserve(FileAttribute.ACL)) {
-  FsPermission perm = src.stat.getPermission();
-  if (perm.getAclBit()) {
+  if (src.stat.hasAcl()) {
+FsPermission perm = src.stat.getPermission();
 List srcEntries =
 src.fs.getAclStatus(src.path).getEntries();
 List srcFullEntries =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7916d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 

[47/50] [abbrv] hadoop git commit: HDFS-11988. Verify HDFS Snapshots with open files captured are consistent across truncates and appends to current version file.

2017-08-21 Thread aengineer
HDFS-11988. Verify HDFS Snapshots with open files captured are consistent 
across truncates and appends to current version file.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/913760cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/913760cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/913760cb

Branch: refs/heads/HDFS-7240
Commit: 913760cb4fe7123e55004800f75dc00540a79f69
Parents: 267e19a
Author: Manoj Govindassamy 
Authored: Mon Aug 21 11:08:38 2017 -0700
Committer: Manoj Govindassamy 
Committed: Mon Aug 21 11:08:38 2017 -0700

--
 .../snapshot/TestOpenFilesWithSnapshot.java | 112 +++
 1 file changed, 112 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/913760cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index bf27f2c..537612c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
@@ -731,6 +732,117 @@ public class TestOpenFilesWithSnapshot {
 cluster.waitActive();
   }
 
+  /**
+   * Verify snapshots with open files captured are safe even when the
+   * 'current' version of the file is truncated and appended later.
+   */
+  @Test (timeout = 12)
+  public void testOpenFilesSnapChecksumWithTrunkAndAppend() throws Exception {
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
+true);
+// Construct the directory tree
+final Path dir = new Path("/A/B/C");
+fs.mkdirs(dir);
+
+// String constants
+final Path hbaseSnapRootDir = dir;
+final String hbaseFileName = "hbase.wal";
+final String hbaseSnap1Name = "hbase_snap_s1";
+final String hbaseSnap2Name = "hbase_snap_s2";
+final String hbaseSnap3Name = "hbase_snap_s3";
+final String hbaseSnap4Name = "hbase_snap_s4";
+
+// Create files and open a stream
+final Path hbaseFile = new Path(dir, hbaseFileName);
+createFile(hbaseFile);
+final FileChecksum hbaseWALFileCksum0 =
+fs.getFileChecksum(hbaseFile);
+FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile);
+
+// Create Snapshot S1
+final Path hbaseS1Dir = SnapshotTestHelper.createSnapshot(
+fs, hbaseSnapRootDir, hbaseSnap1Name);
+final Path hbaseS1Path = new Path(hbaseS1Dir, hbaseFileName);
+final FileChecksum hbaseFileCksumS1 = fs.getFileChecksum(hbaseS1Path);
+
+// Verify if Snap S1 checksum is same as the current version one
+Assert.assertEquals("Live and snap1 file checksum doesn't match!",
+hbaseWALFileCksum0, fs.getFileChecksum(hbaseS1Path));
+
+int newWriteLength = (int) (BLOCKSIZE * 1.5);
+byte[] buf = new byte[newWriteLength];
+Random random = new Random();
+random.nextBytes(buf);
+writeToStream(hbaseOutputStream, buf);
+
+// Create Snapshot S2
+final Path hbaseS2Dir = SnapshotTestHelper.createSnapshot(
+fs, hbaseSnapRootDir, hbaseSnap2Name);
+final Path hbaseS2Path = new Path(hbaseS2Dir, hbaseFileName);
+final FileChecksum hbaseFileCksumS2 = fs.getFileChecksum(hbaseS2Path);
+
+// Verify if the s1 checksum is still the same
+Assert.assertEquals("Snap file checksum has changed!",
+hbaseFileCksumS1, fs.getFileChecksum(hbaseS1Path));
+// Verify if the s2 checksum is different from the s1 checksum
+Assert.assertNotEquals("Snap1 and snap2 file checksum should differ!",
+hbaseFileCksumS1, hbaseFileCksumS2);
+
+newWriteLength = (int) (BLOCKSIZE * 2.5);
+buf = new byte[newWriteLength];
+random.nextBytes(buf);
+writeToStream(hbaseOutputStream, buf);
+
+// Create Snapshot S3
+final Path hbaseS3Dir = SnapshotTestHelper.createSnapshot(
+fs, hbaseSnapRootDir, hbaseSnap3Name);
+final Path hbaseS3Path = new Path(hbaseS3Dir, hbaseFileName);
+FileChecksum 

[03/50] [abbrv] hadoop git commit: HADOOP-14627. Support MSI and DeviceCode token provider in ADLS. Contributed by Atul Sikaria.

2017-08-21 Thread aengineer
HADOOP-14627. Support MSI and DeviceCode token provider in ADLS. Contributed by 
Atul Sikaria.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7769e961
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7769e961
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7769e961

Branch: refs/heads/HDFS-7240
Commit: 7769e9614956283a86eda9e4e69aaa592c0ca960
Parents: 8b242f0
Author: John Zhuge 
Authored: Thu Aug 10 00:43:40 2017 -0700
Committer: John Zhuge 
Committed: Sun Aug 13 00:22:34 2017 -0700

--
 .../src/main/resources/core-default.xml | 37 +++-
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  2 +-
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  8 ++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 21 +
 .../apache/hadoop/fs/adl/TokenProviderType.java |  2 +
 .../src/site/markdown/index.md  | 98 ++--
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 40 
 7 files changed, 198 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ffcab2c..7c4b0f1 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2586,11 +2586,16 @@
 ClientCredential
 
   Defines Azure Active Directory OAuth2 access token provider type.
-  Supported types are ClientCredential, RefreshToken, and Custom.
+  Supported types are ClientCredential, RefreshToken, MSI, DeviceCode,
+  and Custom.
   The ClientCredential type requires property fs.adl.oauth2.client.id,
   fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url.
   The RefreshToken type requires property fs.adl.oauth2.client.id and
   fs.adl.oauth2.refresh.token.
+  The MSI type requires properties fs.adl.oauth2.msi.port and
+  fs.adl.oauth2.msi.tenantguid.
+  The DeviceCode type requires property
+  fs.adl.oauth2.devicecode.clientapp.id.
   The Custom type requires property fs.adl.oauth2.access.token.provider.
 
   
@@ -2627,6 +2632,36 @@
 
   
 
+  
+fs.adl.oauth2.msi.port
+
+
+  The localhost port for the MSI token service. This is the port specified
+  when creating the Azure VM.
+  Used by MSI token provider.
+
+  
+
+  
+fs.adl.oauth2.msi.tenantguid
+
+
+  The tenant guid for the Azure AAD tenant under which the azure data lake
+  store account is created.
+  Used by MSI token provider.
+
+  
+
+  
+fs.adl.oauth2.devicecode.clientapp.id
+
+
+  The app id of the AAD native app in whose context the auth request
+  should be made.
+  Used by DeviceCode token provider.
+
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 3aed5e1..47f12df 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -110,7 +110,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.1.4
+  2.2.1
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 31df222..f77d981 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -54,6 +54,14 @@ public final class AdlConfKeys {
   public static final String TOKEN_PROVIDER_TYPE_CLIENT_CRED =
   "ClientCredential";
 
+  // MSI Auth Configuration
+  public static final String MSI_PORT = "fs.adl.oauth2.msi.port";
+  public static final String MSI_TENANT_GUID = "fs.adl.oauth2.msi.tenantguid";
+
+  // DeviceCode Auth configuration
+  public static final String DEVICE_CODE_CLIENT_APP_ID =
+  "fs.adl.oauth2.devicecode.clientapp.id";
+
   public static final String 

[26/50] [abbrv] hadoop git commit: YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by Robert Kanter

2017-08-21 Thread aengineer
YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by 
Robert Kanter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14553061
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14553061
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14553061

Branch: refs/heads/HDFS-7240
Commit: 14553061be0a341df3e628dcaf06717b4630b05e
Parents: 588c190
Author: Jason Lowe 
Authored: Wed Aug 16 13:04:36 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 13:04:36 2017 -0500

--
 .../apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14553061/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index 14df94a..6a063e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -151,13 +151,13 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
YarnClient rmClient = YarnClient.createYarnClient()) {
   Configuration conf = new YarnConfiguration();
   conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500);
+  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 4500);
   // RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least
   // RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3
   conf.setInt(
-  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6);
+  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 
20);
   cluster.init(conf);
   cluster.start();
   final Configuration yarnConf = cluster.getConfig();
@@ -198,7 +198,7 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
 lastToken = response.getAMRMToken();
 
 // Time slot to be sure the AMRMProxy renew the token
-Thread.sleep(1500);
+Thread.sleep(4500);
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: YARN-6852. Native code changes to support isolate GPU devices by using CGroups. (wangda)

2017-08-21 Thread aengineer
YARN-6852. Native code changes to support isolate GPU devices by using CGroups. 
(wangda)

Change-Id: I4869cc4d8ad539539ccba4bea5a178cacdb741ab


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/436c2638
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/436c2638
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/436c2638

Branch: refs/heads/HDFS-7240
Commit: 436c2638f9ca1fb8de6a630cb5e91d956ac75216
Parents: 8991f0b
Author: Wangda Tan 
Authored: Fri Aug 18 18:26:36 2017 -0700
Committer: Wangda Tan 
Committed: Fri Aug 18 18:26:36 2017 -0700

--
 .../src/CMakeLists.txt  |  12 +-
 .../impl/container-executor.c   |  10 +-
 .../impl/container-executor.h   |   2 +
 .../main/native/container-executor/impl/main.c  |  13 +-
 .../impl/modules/cgroups/cgroups-operations.c   | 161 +
 .../impl/modules/cgroups/cgroups-operations.h   |  55 +
 .../impl/modules/common/constants.h |  29 +++
 .../impl/modules/common/module-configs.c|  41 
 .../impl/modules/common/module-configs.h|  33 +++
 .../impl/modules/gpu/gpu-module.c   | 229 +++
 .../impl/modules/gpu/gpu-module.h   |  45 
 .../container-executor/impl/utils/path-utils.c  |  52 +
 .../container-executor/impl/utils/path-utils.h  |  35 +++
 .../impl/utils/string-utils.c   | 106 +++--
 .../impl/utils/string-utils.h   |   7 +-
 .../test/modules/cgroups/test-cgroups-module.cc | 121 ++
 .../test/modules/gpu/test-gpu-module.cc | 203 
 .../test/test-container-executor-common.h   |  36 +++
 .../test/test-container-executor.c  |  23 +-
 .../native/container-executor/test/test_main.cc |  11 +-
 .../test/utils/test-path-utils.cc   |  67 ++
 .../test/utils/test-string-utils.cc |  93 
 22 files changed, 1338 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/436c2638/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 100d7ca..07c29bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -101,6 +101,10 @@ add_library(container
 main/native/container-executor/impl/container-executor.c
 main/native/container-executor/impl/get_executable.c
 main/native/container-executor/impl/utils/string-utils.c
+main/native/container-executor/impl/utils/path-utils.c
+main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
+main/native/container-executor/impl/modules/common/module-configs.c
+main/native/container-executor/impl/modules/gpu/gpu-module.c
 )
 
 add_executable(container-executor
@@ -113,12 +117,14 @@ target_link_libraries(container-executor
 
 output_directory(container-executor target/usr/local/bin)
 
+# Test cases
 add_executable(test-container-executor
 main/native/container-executor/test/test-container-executor.c
 )
 target_link_libraries(test-container-executor
 container ${EXTRA_LIBS}
 )
+
 output_directory(test-container-executor target/usr/local/bin)
 
 # unit tests for container executor
@@ -126,6 +132,10 @@ add_executable(cetest
 main/native/container-executor/impl/util.c
 main/native/container-executor/test/test_configuration.cc
 main/native/container-executor/test/test_main.cc
+main/native/container-executor/test/utils/test-string-utils.cc
+main/native/container-executor/test/utils/test-path-utils.cc
+
main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc
+main/native/container-executor/test/modules/gpu/test-gpu-module.cc
 main/native/container-executor/test/test_util.cc)
-target_link_libraries(cetest gtest)
+target_link_libraries(cetest gtest container)
 output_directory(cetest test)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/436c2638/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 

[38/50] [abbrv] hadoop git commit: HADOOP-14398. Modify documents for the FileSystem Builder API. (Lei (Eddy) Xu)

2017-08-21 Thread aengineer
HADOOP-14398. Modify documents for the FileSystem Builder API. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99e558b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99e558b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99e558b1

Branch: refs/heads/HDFS-7240
Commit: 99e558b13ba4d5832aea97374e1d07b4e78e5e39
Parents: 4230872
Author: Lei Xu 
Authored: Thu Aug 17 18:06:23 2017 -0700
Committer: Lei Xu 
Committed: Thu Aug 17 18:06:23 2017 -0700

--
 .../hadoop/fs/FSDataOutputStreamBuilder.java|  74 ++--
 .../src/site/markdown/filesystem/filesystem.md  |  33 +++-
 .../filesystem/fsdataoutputstreambuilder.md | 182 +++
 .../src/site/markdown/filesystem/index.md   |   1 +
 4 files changed, 272 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e558b1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 1f668eb..86c284a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -54,16 +54,29 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
  * options accordingly, for example:
  *
  * 
- * FSDataOutputStreamBuilder builder = fs.createFile(path);
- * builder.permission(perm)
+ *
+ * // Don't
+ * if (fs instanceof FooFileSystem) {
+ *   FooFileSystem fs = (FooFileSystem) fs;
+ *   OutputStream out = dfs.createFile(path)
+ * .optionA()
+ * .optionB("value")
+ * .cache()
+ *   .build()
+ * } else if (fs instanceof BarFileSystem) {
+ *   ...
+ * }
+ *
+ * // Do
+ * OutputStream out = fs.createFile(path)
+ *   .permission(perm)
  *   .bufferSize(bufSize)
- *   .opt("dfs.outputstream.builder.lazy-persist", true)
- *   .opt("dfs.outputstream.builder.ec.policy-name", "rs-3-2-64k")
- *   .opt("fs.local.o-direct", true)
- *   .must("fs.s3a.fast-upload", true)
- *   .must("fs.azure.buffer-size", 256 * 1024 * 1024);
- * FSDataOutputStream out = builder.build();
- * ...
+ *   .opt("foofs:option.a", true)
+ *   .opt("foofs:option.b", "value")
+ *   .opt("barfs:cache", true)
+ *   .must("foofs:cache", true)
+ *   .must("barfs:cache-size", 256 * 1024 * 1024)
+ *   .build();
  * 
  *
  * If the option is not related to the file system, the option will be ignored.
@@ -263,6 +276,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional boolean parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, boolean value) {
 mandatoryKeys.remove(key);
@@ -272,6 +287,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional int parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, int value) {
 mandatoryKeys.remove(key);
@@ -281,6 +298,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional float parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, float value) {
 mandatoryKeys.remove(key);
@@ -290,6 +309,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional double parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, double value) {
 mandatoryKeys.remove(key);
@@ -299,6 +320,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set an array of string values as optional parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, @Nonnull final String... values) {
 mandatoryKeys.remove(key);
@@ -310,8 +333,7 @@ public abstract class FSDataOutputStreamBuilder
* Set mandatory option to the Builder.
*
* If the option is not supported or unavailable on the {@link FileSystem},
-   * the client should expect {@link #build()} throws
-   * {@link IllegalArgumentException}.
+   * the client should expect {@link #build()} throws IllegalArgumentException.
*/
   public B must(@Nonnull final String key, @Nonnull final String value) {
 mandatoryKeys.add(key);
@@ -319,35 +341,55 @@ public abstract class FSDataOutputStreamBuilder
 return getThisBuilder();
   }
 
-  /** Set mandatory boolean option. */
+  /**
+   * 

[45/50] [abbrv] hadoop git commit: YARN-6979. [Addendum patch] Fixed classname and added javadocs. (Kartheek Muthyala via asuresh)

2017-08-21 Thread aengineer
YARN-6979. [Addendum patch] Fixed classname and added javadocs. (Kartheek 
Muthyala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a82d7bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a82d7bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a82d7bc

Branch: refs/heads/HDFS-7240
Commit: 7a82d7bcea8124e1b65c275fac15bf2047d17471
Parents: 8410d86
Author: Arun Suresh 
Authored: Sun Aug 20 08:55:13 2017 -0700
Committer: Arun Suresh 
Committed: Sun Aug 20 10:24:05 2017 -0700

--
 .../CMgrDecreaseContainersResourceEvent.java| 37 ---
 .../nodemanager/CMgrUpdateContainersEvent.java  | 48 
 .../nodemanager/ContainerManagerEventType.java  |  2 +-
 .../nodemanager/NodeStatusUpdaterImpl.java  |  8 ++--
 .../containermanager/ContainerManagerImpl.java  | 10 ++--
 5 files changed, 57 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a82d7bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
deleted file mode 100644
index 9479d0b..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.nodemanager;
-
-import org.apache.hadoop.yarn.api.records.Container;
-import java.util.List;
-
-public class CMgrDecreaseContainersResourceEvent extends ContainerManagerEvent 
{
-
-  private final List containersToDecrease;
-
-  public CMgrDecreaseContainersResourceEvent(List
-  containersToDecrease) {
-super(ContainerManagerEventType.DECREASE_CONTAINERS_RESOURCE);
-this.containersToDecrease = containersToDecrease;
-  }
-
-  public List getContainersToDecrease() {
-return this.containersToDecrease;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a82d7bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
new file mode 100644
index 000..5e41701
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is 

[40/50] [abbrv] hadoop git commit: YARN-7007. NPE in RM while using YarnClient.getApplications(). Contributed by Lingfeng Su.

2017-08-21 Thread aengineer
YARN-7007. NPE in RM while using YarnClient.getApplications(). Contributed by 
Lingfeng Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e05fa345
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e05fa345
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e05fa345

Branch: refs/heads/HDFS-7240
Commit: e05fa3451db343c0d22496b332910874b6be5b7f
Parents: c6b4e65
Author: bibinchundatt 
Authored: Fri Aug 18 20:28:50 2017 +0530
Committer: bibinchundatt 
Committed: Fri Aug 18 20:28:50 2017 +0530

--
 .../rmapp/attempt/RMAppAttemptMetrics.java   | 19 +++
 1 file changed, 11 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e05fa345/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
index e089050..0655609 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
@@ -31,6 +31,7 @@ import 
org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -125,14 +126,16 @@ public class RMAppAttemptMetrics {
 long vcoreSeconds = finishedVcoreSeconds.get();
 
 // Only add in the running containers if this is the active attempt.
-RMAppAttempt currentAttempt = rmContext.getRMApps()
-   .get(attemptId.getApplicationId()).getCurrentAppAttempt();
-if (currentAttempt.getAppAttemptId().equals(attemptId)) {
-  ApplicationResourceUsageReport appResUsageReport = rmContext
-.getScheduler().getAppResourceUsageReport(attemptId);
-  if (appResUsageReport != null) {
-memorySeconds += appResUsageReport.getMemorySeconds();
-vcoreSeconds += appResUsageReport.getVcoreSeconds();
+RMApp rmApp = rmContext.getRMApps().get(attemptId.getApplicationId());
+if (null != rmApp) {
+  RMAppAttempt currentAttempt = rmApp.getCurrentAppAttempt();
+  if (currentAttempt.getAppAttemptId().equals(attemptId)) {
+ApplicationResourceUsageReport appResUsageReport = rmContext
+.getScheduler().getAppResourceUsageReport(attemptId);
+if (appResUsageReport != null) {
+  memorySeconds += appResUsageReport.getMemorySeconds();
+  vcoreSeconds += appResUsageReport.getVcoreSeconds();
+}
   }
 }
 return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType update. (Kartheek Muthyala via asuresh)

2017-08-21 Thread aengineer
YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType 
update. (Kartheek Muthyala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d7be1d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d7be1d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d7be1d8

Branch: refs/heads/HDFS-7240
Commit: 4d7be1d8575e9254c59d41460960708e3718503a
Parents: 0446511
Author: Arun Suresh 
Authored: Mon Aug 14 19:46:17 2017 -0700
Committer: Arun Suresh 
Committed: Mon Aug 14 19:46:17 2017 -0700

--
 .../yarn/client/api/impl/TestAMRMClient.java| 395 +--
 .../yarn/client/api/impl/TestNMClient.java  |   7 +-
 .../containermanager/ContainerManagerImpl.java  | 132 ---
 .../containermanager/container/Container.java   |   4 +-
 .../container/ContainerImpl.java|  37 +-
 .../monitor/ContainersMonitorImpl.java  |  15 -
 .../scheduler/ContainerScheduler.java   |  73 
 .../scheduler/ContainerSchedulerEventType.java  |   1 +
 .../UpdateContainerSchedulerEvent.java  |  85 
 .../nodemanager/TestNodeManagerResync.java  |  11 +-
 .../BaseContainerManagerTest.java   |  33 +-
 .../containermanager/TestContainerManager.java  | 267 -
 .../TestContainerManagerRecovery.java   |   2 +-
 .../TestContainerSchedulerQueuing.java  |  96 +
 .../nodemanager/webapp/MockContainer.java   |   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |   2 +-
 .../security/RMContainerTokenSecretManager.java |  30 +-
 17 files changed, 964 insertions(+), 228 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7be1d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 1b2bca3..09b12f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.client.api.impl;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
@@ -36,6 +37,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -142,6 +144,10 @@ public class TestAMRMClient {
 // set the minimum allocation so that resource decrease can go under 1024
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
 conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
+conf.setBoolean(
+YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
+conf.setInt(
+YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10);
 yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), 
nodeCount, 1, 1);
 yarnCluster.init(conf);
 yarnCluster.start();
@@ -924,8 +930,8 @@ public class TestAMRMClient {
 // add exp=x to ANY
 client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024,
 1), null, null, Priority.UNDEFINED, true, "x"));
-Assert.assertEquals(1, client.ask.size());
-Assert.assertEquals("x", client.ask.iterator().next()
+assertEquals(1, client.ask.size());
+assertEquals("x", client.ask.iterator().next()
 .getNodeLabelExpression());
 
 // add exp=x then add exp=a to ANY in same priority, only exp=a should kept
@@ -933,8 +939,8 @@ public class TestAMRMClient {
 1), null, null, Priority.UNDEFINED, true, "x"));
 client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024,
 1), null, null, Priority.UNDEFINED, true, "a"));
-Assert.assertEquals(1, client.ask.size());
-Assert.assertEquals("a", client.ask.iterator().next()
+assertEquals(1, client.ask.size());
+assertEquals("a", client.ask.iterator().next()
 .getNodeLabelExpression());
 
 // add exp=x to ANY, rack and node, only resource request has ANY resource
@@ -943,10 +949,10 @@ public class TestAMRMClient {
 client.addContainerRequest(new 

[17/50] [abbrv] hadoop git commit: HADOOP-14726. Mark FileStatus::isDir as final

2017-08-21 Thread aengineer
HADOOP-14726. Mark FileStatus::isDir as final


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/645a8f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/645a8f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/645a8f2a

Branch: refs/heads/HDFS-7240
Commit: 645a8f2a4d09acb5a21820f52ee78784d9e4cc8a
Parents: 4d7be1d
Author: Chris Douglas 
Authored: Mon Aug 14 21:57:20 2017 -0700
Committer: Chris Douglas 
Committed: Mon Aug 14 21:57:20 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileStatus.java| 19 +--
 .../hadoop/fs/viewfs/ViewFsFileStatus.java   |  8 +---
 .../fs/viewfs/ViewFsLocatedFileStatus.java   |  6 --
 .../hadoop/hdfs/protocolPB/PBHelperClient.java   |  2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java   |  6 --
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java |  8 
 .../apache/hadoop/hdfs/server/mover/Mover.java   |  2 +-
 .../hdfs/server/namenode/NamenodeFsck.java   |  4 ++--
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java |  3 +--
 .../hdfs/server/mover/TestStorageMover.java  |  2 +-
 .../hadoop/hdfs/server/namenode/TestStartup.java |  4 ++--
 .../server/namenode/ha/TestEditLogTailer.java|  4 ++--
 .../namenode/ha/TestFailureToReadEdits.java  |  6 +++---
 .../namenode/ha/TestInitializeSharedEdits.java   |  2 +-
 .../lib/input/TestCombineFileInputFormat.java|  2 +-
 .../azure/TestOutOfBandAzureBlobOperations.java  |  8 
 .../hadoop/fs/swift/snative/SwiftFileStatus.java | 16 
 .../snative/SwiftNativeFileSystemStore.java  |  4 ++--
 .../fs/swift/TestSwiftFileSystemDirectories.java |  4 ++--
 .../TestSwiftFileSystemPartitionedUploads.java   |  2 +-
 20 files changed, 46 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/645a8f2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 2f22ea0..8575439 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -172,7 +172,7 @@ public class FileStatus implements Writable, 
Comparable,
* @return true if this is a file
*/
   public boolean isFile() {
-return !isdir && !isSymlink();
+return !isDirectory() && !isSymlink();
   }
 
   /**
@@ -182,20 +182,20 @@ public class FileStatus implements Writable, 
Comparable,
   public boolean isDirectory() {
 return isdir;
   }
-  
+
   /**
-   * Old interface, instead use the explicit {@link FileStatus#isFile()}, 
-   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} 
+   * Old interface, instead use the explicit {@link FileStatus#isFile()},
+   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
* @return true if this is a directory.
-   * @deprecated Use {@link FileStatus#isFile()},  
-   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} 
+   * @deprecated Use {@link FileStatus#isFile()},
+   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
* instead.
*/
   @Deprecated
-  public boolean isDir() {
-return isdir;
+  public final boolean isDir() {
+return isDirectory();
   }
-  
+
   /**
* Is this a symbolic link?
* @return true if this is a symbolic link
@@ -448,7 +448,6 @@ public class FileStatus implements Writable, 
Comparable,
 FileStatus other = PBHelper.convert(proto);
 isdir = other.isDirectory();
 length = other.getLen();
-isdir = other.isDirectory();
 block_replication = other.getReplication();
 blocksize = other.getBlockSize();
 modification_time = other.getModificationTime();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/645a8f2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
index e0f62e4..ce03ced 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
@@ -61,13 +61,7 @@ class ViewFsFileStatus extends FileStatus {
 

[02/50] [abbrv] hadoop git commit: HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.

2017-08-21 Thread aengineer
HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . 
Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b242f09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b242f09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b242f09

Branch: refs/heads/HDFS-7240
Commit: 8b242f09a61a7536d2422546bfa6c2aaf1d57ed6
Parents: 28d97b7
Author: John Zhuge 
Authored: Thu Aug 10 14:04:36 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 19:42:07 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 11 ++--
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 63 
 2 files changed, 70 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b242f09/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index dcc997c..6bff172 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1131,8 +1131,9 @@ public class DFSInputStream extends FSInputStream
 Future firstRequest = hedgedService
 .submit(getFromDataNodeCallable);
 futures.add(firstRequest);
+Future future = null;
 try {
-  Future future = hedgedService.poll(
+  future = hedgedService.poll(
   conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
   if (future != null) {
 ByteBuffer result = future.get();
@@ -1142,16 +1143,18 @@ public class DFSInputStream extends FSInputStream
   }
   DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
   + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
-  // Ignore this node on next go around.
-  ignored.add(chosenNode.info);
   dfsClient.getHedgedReadMetrics().incHedgedReadOps();
   // continue; no need to refresh block locations
 } catch (ExecutionException e) {
-  // Ignore
+  futures.remove(future);
 } catch (InterruptedException e) {
   throw new InterruptedIOException(
   "Interrupted while waiting for reading task");
 }
+// Ignore this node on next go around.
+// If poll timeout and the request still ongoing, don't consider it
+// again. If read data failed, don't consider it either.
+ignored.add(chosenNode.info);
   } else {
 // We are starting up a 'hedged' read. We have a read already
 // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b242f09/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 85fc97b..bcb02b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -59,6 +59,8 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Supplier;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
 
 /**
  * This class tests the DFS positional read functionality in a single node
@@ -72,6 +74,9 @@ public class TestPread {
   boolean simulatedStorage;
   boolean isHedgedRead;
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestPread.class.getName());
+
   @Before
   public void setup() {
 simulatedStorage = false;
@@ -551,6 +556,64 @@ public class TestPread {
 }
   }
 
+  @Test(timeout=3)
+  public void testHedgedReadFromAllDNFailed() throws IOException {
+Configuration conf = new Configuration();
+int numHedgedReadPoolThreads = 5;
+final int hedgedReadTimeoutMillis = 50;
+
+conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
+numHedgedReadPoolThreads);
+conf.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
+hedgedReadTimeoutMillis);
+conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 

[05/50] [abbrv] hadoop git commit: HDFS-12221. Replace xcerces in XmlEditsVisitor. (Ajay Kumar via lei)

2017-08-21 Thread aengineer
HDFS-12221. Replace xcerces in XmlEditsVisitor. (Ajay Kumar via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce797a17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce797a17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce797a17

Branch: refs/heads/HDFS-7240
Commit: ce797a170669524224cfeaaf70647047e7626816
Parents: d8f74c3
Author: Lei Xu 
Authored: Mon Aug 14 10:27:47 2017 -0700
Committer: Lei Xu 
Committed: Mon Aug 14 10:27:47 2017 -0700

--
 .../hadoop-client-minicluster/pom.xml   |   6 --
 .../hadoop-client-runtime/pom.xml   |   7 ---
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   5 --
 .../offlineEditsViewer/XmlEditsVisitor.java |  41 
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 5850 -> 5850 bytes
 .../src/test/resources/editsStored.xml  |  62 +--
 .../hadoop-mapreduce-client/pom.xml |  10 +--
 hadoop-project-dist/pom.xml |  10 +--
 hadoop-project/pom.xml  |   8 ---
 hadoop-yarn-project/hadoop-yarn/pom.xml |  10 +--
 10 files changed, 62 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 5255640..5cf1fad 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -629,12 +629,6 @@
   
 
 
-  xerces:xercesImpl
-  
-**/*
-  
-
-
   
org.apache.hadoop:hadoop-mapreduce-client-jobclient:*
   
 testjar/*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 2f64152..24c6b7a 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -174,13 +174,6 @@
 
org/apache/jasper/compiler/Localizer.class
   
 
-
-
-  xerces:xercesImpl
-  
-META-INF/services/*
-  
-
 
 
   com.sun.jersey:*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 1c50d31..fa1044d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -174,11 +174,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   compile
 
 
-  xerces
-  xercesImpl
-  compile
-
-
   org.apache.htrace
   htrace-core4
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
index 7a39ba6..ddf7933 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
@@ -20,17 +20,21 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.TransformerConfigurationException;
+import javax.xml.transform.sax.SAXTransformerFactory;
+import javax.xml.transform.sax.TransformerHandler;
+import javax.xml.transform.stream.StreamResult;
+import org.xml.sax.ContentHandler;
+import org.xml.sax.SAXException;
+import 

[48/50] [abbrv] hadoop git commit: HDFS-12325. SFTPFileSystem operations should restore cwd. Contributed by Chen Liang.

2017-08-21 Thread aengineer
HDFS-12325. SFTPFileSystem operations should restore cwd. Contributed by Chen 
Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/736ceab2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/736ceab2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/736ceab2

Branch: refs/heads/HDFS-7240
Commit: 736ceab2f58fb9ab5907c5b5110bd44384038e6b
Parents: 913760c
Author: Arpit Agarwal 
Authored: Sun Aug 20 23:41:06 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 21 11:48:51 2017 -0700

--
 .../main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java| 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/736ceab2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
index 421769d..43eb783 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -326,8 +326,10 @@ public class SFTPFileSystem extends FileSystem {
 String parentDir = parent.toUri().getPath();
 boolean succeeded = true;
 try {
+  final String previousCwd = client.pwd();
   client.cd(parentDir);
   client.mkdir(pathName);
+  client.cd(previousCwd);
 } catch (SftpException e) {
   throw new IOException(String.format(E_MAKE_DIR_FORPATH, pathName,
   parentDir));
@@ -474,8 +476,10 @@ public class SFTPFileSystem extends FileSystem {
 }
 boolean renamed = true;
 try {
+  final String previousCwd = channel.pwd();
   channel.cd("/");
   channel.rename(src.toUri().getPath(), dst.toUri().getPath());
+  channel.cd(previousCwd);
 } catch (SftpException e) {
   renamed = false;
 }
@@ -558,8 +562,10 @@ public class SFTPFileSystem extends FileSystem {
 }
 OutputStream os;
 try {
+  final String previousCwd = client.pwd();
   client.cd(parent.toUri().getPath());
   os = client.put(f.getName());
+  client.cd(previousCwd);
 } catch (SftpException e) {
   throw new IOException(e);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: YARN-6881. LOG is unused in AllocationConfiguration (Contributed by weiyuan via Daniel Templeton)

2017-08-21 Thread aengineer
YARN-6881. LOG is unused in AllocationConfiguration (Contributed by weiyuan via 
Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b09c327
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b09c327
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b09c327

Branch: refs/heads/HDFS-7240
Commit: 6b09c327057947049ef7984afbb5ed225f15fc2d
Parents: 608a06c
Author: Daniel Templeton 
Authored: Mon Aug 14 11:55:33 2017 -0700
Committer: Daniel Templeton 
Committed: Mon Aug 14 11:55:33 2017 -0700

--
 .../resourcemanager/scheduler/fair/AllocationConfiguration.java   | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b09c327/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index f143aa6..71e6f7f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -23,8 +23,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -41,7 +39,6 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 
 public class AllocationConfiguration extends ReservationSchedulerConfiguration 
{
-  private static final Log LOG = LogFactory.getLog(FSQueue.class.getName());
   private static final AccessControlList EVERYBODY_ACL = new 
AccessControlList("*");
   private static final AccessControlList NOBODY_ACL = new AccessControlList(" 
");
   private static final ResourceCalculator RESOURCE_CALCULATOR =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: HDFS-11082. Provide replicated EC policy to replicate files. Contributed by SammiChen.

2017-08-21 Thread aengineer
HDFS-11082. Provide replicated EC policy to replicate files. Contributed by 
SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96b3a6b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96b3a6b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96b3a6b9

Branch: refs/heads/HDFS-7240
Commit: 96b3a6b9721e922d33fadc2459b561a85dbf9b8e
Parents: 08aaa4b
Author: Andrew Wang 
Authored: Wed Aug 16 22:17:06 2017 -0700
Committer: Andrew Wang 
Committed: Wed Aug 16 22:17:06 2017 -0700

--
 .../io/erasurecode/ErasureCodeConstants.java|  8 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  3 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  6 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|  6 +-
 .../hdfs/protocol/ErasureCodingPolicy.java  |  5 ++
 .../protocol/SystemErasureCodingPolicies.java   | 14 
 .../namenode/ErasureCodingPolicyManager.java| 13 ++-
 .../server/namenode/FSDirErasureCodingOp.java   | 13 ++-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  2 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 24 +-
 .../src/site/markdown/HDFSErasureCoding.md  | 16 ++--
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 81 ++
 .../hdfs/server/namenode/TestFSImage.java   | 87 
 .../test/resources/testErasureCodingConf.xml| 78 +-
 14 files changed, 331 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b3a6b9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index e0d7946..d3c3b6b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -30,6 +30,7 @@ public final class ErasureCodeConstants {
   public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
   public static final String XOR_CODEC_NAME = "xor";
   public static final String HHXOR_CODEC_NAME = "hhxor";
+  public static final String REPLICATION_CODEC_NAME = "replication";
 
   public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
   RS_CODEC_NAME, 6, 3);
@@ -45,4 +46,11 @@ public final class ErasureCodeConstants {
 
   public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
   RS_CODEC_NAME, 10, 4);
+
+  public static final ECSchema REPLICATION_1_2_SCHEMA = new ECSchema(
+  REPLICATION_CODEC_NAME, 1, 2);
+
+  public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
+  public static final byte REPLICATION_POLICY_ID = (byte) 63;
+  public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b3a6b9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 969522d..47c14e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3044,7 +3044,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
*
* @param src path to get the information for
* @return Returns the policy information if file or directory on the path is
-   * erasure coded, null otherwise
+   * erasure coded, null otherwise. Null will be returned if directory or file
+   * has REPLICATION policy.
* @throws IOException
*/
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b3a6b9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 8f82d03..ceec2b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 

[44/50] [abbrv] hadoop git commit: YARN-6979. Add flag to notify all types of container updates to NM via NodeHeartbeatResponse. (Kartheek Muthyala via asuresh)

2017-08-21 Thread aengineer
YARN-6979. Add flag to notify all types of container updates to NM via 
NodeHeartbeatResponse. (Kartheek Muthyala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8410d862
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8410d862
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8410d862

Branch: refs/heads/HDFS-7240
Commit: 8410d862d3a72740f461ef91dddb5325955e1ca5
Parents: 436c263
Author: Arun Suresh 
Authored: Sun Aug 20 07:54:09 2017 -0700
Committer: Arun Suresh 
Committed: Sun Aug 20 07:54:09 2017 -0700

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   2 +-
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../src/main/resources/yarn-default.xml |   8 +
 .../protocolrecords/NodeHeartbeatResponse.java  |   6 +-
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  42 ++---
 .../yarn_server_common_service_protos.proto |   3 +
 .../hadoop/yarn/TestYarnServerApiClasses.java   |   6 +-
 .../nodemanager/NodeStatusUpdaterImpl.java  |   2 +-
 .../containermanager/ContainerManagerImpl.java  |  35 ++--
 .../scheduler/ContainerScheduler.java   |   1 +
 .../resourcemanager/ResourceTrackerService.java |   2 +-
 .../rmcontainer/RMContainerImpl.java|   8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   6 +-
 .../rmnode/RMNodeDecreaseContainerEvent.java|  39 -
 .../resourcemanager/rmnode/RMNodeEventType.java |   2 +-
 .../resourcemanager/rmnode/RMNodeImpl.java  |  29 ++--
 .../rmnode/RMNodeUpdateContainerEvent.java  |  44 +
 .../scheduler/AbstractYarnScheduler.java|  11 ++
 .../scheduler/SchedulerApplicationAttempt.java  |  39 +++--
 .../yarn/server/resourcemanager/MockNodes.java  |   2 +-
 ...pportunisticContainerAllocatorAMService.java | 168 +++
 .../capacity/TestContainerResizing.java |   7 +-
 .../capacity/TestIncreaseAllocationExpirer.java |   4 +-
 24 files changed, 346 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8410d862/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 8962aba..e71ddff 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -179,7 +179,7 @@ public class NodeInfo {
 }
 
 @Override
-public void updateNodeHeartbeatResponseForContainersDecreasing(
+public void updateNodeHeartbeatResponseForUpdatedContainers(
 NodeHeartbeatResponse response) {
   // TODO Auto-generated method stub
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8410d862/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index d7b159c..6b7ac3c 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -168,7 +168,7 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
-  public void updateNodeHeartbeatResponseForContainersDecreasing(
+  public void updateNodeHeartbeatResponseForUpdatedContainers(
   NodeHeartbeatResponse response) {
 // TODO Auto-generated method stub
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8410d862/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8515e0a..86f45b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -167,6 +167,10 @@ public class YarnConfiguration 

[23/50] [abbrv] hadoop git commit: HDFS-12301. NN File Browser UI: Navigate to a path when enter is pressed

2017-08-21 Thread aengineer
HDFS-12301. NN File Browser UI: Navigate to a path when enter is pressed


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f34646d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f34646d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f34646d6

Branch: refs/heads/HDFS-7240
Commit: f34646d652310442cb5339aa269f10dfa838
Parents: d265459
Author: Ravi Prakash 
Authored: Tue Aug 15 15:44:59 2017 -0700
Committer: Ravi Prakash 
Committed: Tue Aug 15 15:44:59 2017 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f34646d6/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 3e276a9..dae3519 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -370,6 +370,12 @@
 
 var b = function() { browse_directory($('#directory').val()); };
 $('#btn-nav-directory').click(b);
+//Also navigate to the directory when a user presses enter.
+$('#directory').on('keyup', function (e) {
+  if (e.which == 13) {
+browse_directory($('#directory').val());
+  }
+});
 var dir = window.location.hash.slice(1);
 if(dir == "") {
   window.location.hash = "/";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HADOOP-14560. Make HttpServer2 backlog size configurable. Contributed by Alexander Krasheninnikov.

2017-08-21 Thread aengineer
HADOOP-14560. Make HttpServer2 backlog size configurable. Contributed by 
Alexander Krasheninnikov.

This closes #242.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f04cb45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f04cb45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f04cb45

Branch: refs/heads/HDFS-7240
Commit: 1f04cb45f70648678840cdafbec68d534b03fe95
Parents: 96b3a6b
Author: Alexandr Krasheninnikov 
Authored: Wed Jun 21 12:57:34 2017 +0300
Committer: John Zhuge 
Committed: Thu Aug 17 01:05:19 2017 -0700

--
 .../main/java/org/apache/hadoop/http/HttpServer2.java  |  9 -
 .../java/org/apache/hadoop/http/TestHttpServer.java| 13 +
 2 files changed, 21 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f04cb45/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 28b9bb0..a450f66 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -128,6 +128,10 @@ public final class HttpServer2 implements FilterContainer {
   public static final String HTTP_MAX_RESPONSE_HEADER_SIZE_KEY =
   "hadoop.http.max.response.header.size";
   public static final int HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT = 65536;
+
+  public static final String HTTP_SOCKET_BACKLOG_SIZE_KEY =
+  "hadoop.http.socket.backlog.size";
+  public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
 
@@ -433,6 +437,9 @@ public final class HttpServer2 implements FilterContainer {
   httpConfig.setResponseHeaderSize(responseHeaderSize);
   httpConfig.setSendServerVersion(false);
 
+  int backlogSize = conf.getInt(HTTP_SOCKET_BACKLOG_SIZE_KEY,
+  HTTP_SOCKET_BACKLOG_SIZE_DEFAULT);
+
   for (URI ep : endpoints) {
 final ServerConnector connector;
 String scheme = ep.getScheme();
@@ -448,6 +455,7 @@ public final class HttpServer2 implements FilterContainer {
 }
 connector.setHost(ep.getHost());
 connector.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
+connector.setAcceptQueueSize(backlogSize);
 server.addListener(connector);
   }
   server.loadListeners();
@@ -640,7 +648,6 @@ public final class HttpServer2 implements FilterContainer {
 
   private static void configureChannelConnector(ServerConnector c) {
 c.setIdleTimeout(1);
-c.setAcceptQueueSize(128);
 if(Shell.WINDOWS) {
   // result of setting the SO_REUSEADDR flag is different on Windows
   // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f04cb45/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index 6ec6e0f..ca7e466 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -682,4 +682,17 @@ public class TestHttpServer extends 
HttpServerFunctionalTest {
   stopHttpServer(myServer2);
 }
   }
+
+  @Test
+  public void testBacklogSize() throws Exception
+  {
+final int backlogSize = 2048;
+Configuration conf = new Configuration();
+conf.setInt(HttpServer2.HTTP_SOCKET_BACKLOG_SIZE_KEY, backlogSize);
+HttpServer2 srv = createServer("test", conf);
+List listeners = (List) Whitebox.getInternalState(srv,
+"listeners");
+ServerConnector listener = (ServerConnector)listeners.get(0);
+assertEquals(backlogSize, listener.getAcceptQueueSize());
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: HADOOP-14769. WASB: delete recursive should not fail if a file is deleted. Contributed by Thomas Marquardt

2017-08-21 Thread aengineer
HADOOP-14769. WASB: delete recursive should not fail if a file is deleted.
Contributed by Thomas Marquardt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6b4e656
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6b4e656
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6b4e656

Branch: refs/heads/HDFS-7240
Commit: c6b4e656b76b68cc1d0dbcc15a5aa5ea23335b7b
Parents: 99e558b
Author: Steve Loughran 
Authored: Fri Aug 18 14:13:40 2017 +0100
Committer: Steve Loughran 
Committed: Fri Aug 18 14:13:40 2017 +0100

--
 .../fs/azure/AzureNativeFileSystemStore.java| 21 ---
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 47 ---
 .../TestFileSystemOperationsWithThreads.java| 61 
 3 files changed, 86 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6b4e656/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 554027b..b0cd701 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2459,8 +2459,11 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 try {
   blob.delete(operationContext, lease);
 } catch (StorageException e) {
-  LOG.error("Encountered Storage Exception for delete on Blob: {}, 
Exception Details: {} Error Code: {}",
-  blob.getUri(), e.getMessage(), e.getErrorCode());
+  if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
+LOG.error("Encountered Storage Exception for delete on Blob: {}"
++ ", Exception Details: {} Error Code: {}",
+blob.getUri(), e.getMessage(), e.getErrorCode());
+  }
   // On exception, check that if:
   // 1. It's a BlobNotFound exception AND
   // 2. It got there after one-or-more retries THEN
@@ -2491,17 +2494,17 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 // Container doesn't exist, no need to do anything
 return true;
   }
-
   // Get the blob reference and delete it.
   CloudBlobWrapper blob = getBlobReference(key);
-  if (blob.exists(getInstrumentedContext())) {
-safeDelete(blob, lease);
-return true;
-  } else {
+  safeDelete(blob, lease);
+  return true;
+} catch (Exception e) {
+  if (e instanceof StorageException
+  && NativeAzureFileSystemHelper.isFileNotFoundException(
+  (StorageException) e)) {
+// the file or directory does not exist
 return false;
   }
-} catch (Exception e) {
-  // Re-throw as an Azure storage exception.
   throw new AzureException(e);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6b4e656/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index a7558a3..2abc6c6 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2043,7 +2043,12 @@ public class NativeAzureFileSystem extends FileSystem {
   AzureFileSystemThreadTask task = new AzureFileSystemThreadTask() {
 @Override
 public boolean execute(FileMetadata file) throws IOException{
-  return deleteFile(file.getKey(), file.isDir());
+  if (!deleteFile(file.getKey(), file.isDir())) {
+LOG.warn("Attempt to delete non-existent {} {}",
+file.isDir() ? "directory" : "file",
+file.getKey());
+  }
+  return true;
 }
   };
 
@@ -2080,30 +2085,28 @@ public class NativeAzureFileSystem extends FileSystem {
 return new AzureFileSystemThreadPoolExecutor(threadCount, 
threadNamePrefix, operation, key, config);
   }
 
-  // Delete single file / directory from key.
+  /**
+   * Delete the specified file or directory and increment metrics.
+   * If the file or directory does not exist, the operation returns false.
+   * 

[01/50] [abbrv] hadoop git commit: YARN-6687. Validate that the duration of the periodic reservation is less than the periodicity. (subru via curino)

2017-08-21 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 7d132596d -> d0bd0f623


YARN-6687. Validate that the duration of the periodic reservation is less than 
the periodicity. (subru via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28d97b79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28d97b79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28d97b79

Branch: refs/heads/HDFS-7240
Commit: 28d97b79b69bb2be02d9320105e155eeed6f9e78
Parents: cc59b5f
Author: Carlo Curino 
Authored: Fri Aug 11 16:58:04 2017 -0700
Committer: Carlo Curino 
Committed: Fri Aug 11 16:58:04 2017 -0700

--
 .../reservation/ReservationInputValidator.java  | 18 ++--
 .../TestReservationInputValidator.java  | 93 
 2 files changed, 106 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
index 0e9a825..027d066 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
@@ -129,11 +129,12 @@ public class ReservationInputValidator {
   Resources.multiply(rr.getCapability(), rr.getConcurrency()));
 }
 // verify the allocation is possible (skip for ANY)
-if (contract.getDeadline() - contract.getArrival() < minDuration
+long duration = contract.getDeadline() - contract.getArrival();
+if (duration < minDuration
 && type != ReservationRequestInterpreter.R_ANY) {
   message =
   "The time difference ("
-  + (contract.getDeadline() - contract.getArrival())
+  + (duration)
   + ") between arrival (" + contract.getArrival() + ") "
   + "and deadline (" + contract.getDeadline() + ") must "
   + " be greater or equal to the minimum resource duration ("
@@ -158,15 +159,22 @@ public class ReservationInputValidator {
 // check that the recurrence is a positive long value.
 String recurrenceExpression = contract.getRecurrenceExpression();
 try {
-  Long recurrence = Long.parseLong(recurrenceExpression);
+  long recurrence = Long.parseLong(recurrenceExpression);
   if (recurrence < 0) {
 message = "Negative Period : " + recurrenceExpression + ". Please try"
-+ " again with a non-negative long value as period";
++ " again with a non-negative long value as period.";
+throw RPCUtil.getRemoteException(message);
+  }
+  // verify duration is less than recurrence for periodic reservations
+  if (recurrence > 0 && duration > recurrence) {
+message = "Duration of the requested reservation: " + duration
++ " is greater than the recurrence: " + recurrence
++ ". Please try again with a smaller duration.";
 throw RPCUtil.getRemoteException(message);
   }
 } catch (NumberFormatException e) {
   message = "Invalid period " + recurrenceExpression + ". Please try"
-  + " again with a non-negative long value as period";
+  + " again with a non-negative long value as period.";
   throw RPCUtil.getRemoteException(message);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
 

[19/50] [abbrv] hadoop git commit: HDFS-12054. FSNamesystem#addErasureCodingPolicies should call checkNameNodeSafeMode() to ensure Namenode is not in safemode. Contributed by lufei.

2017-08-21 Thread aengineer
HDFS-12054. FSNamesystem#addErasureCodingPolicies should call 
checkNameNodeSafeMode() to ensure Namenode is not in safemode. Contributed by 
lufei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1040bae6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1040bae6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1040bae6

Branch: refs/heads/HDFS-7240
Commit: 1040bae6fcbae7079d8126368cdeac60831a4d0c
Parents: 2e43c28
Author: Wei-Chiu Chuang 
Authored: Tue Aug 15 07:38:43 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 15 07:38:43 2017 -0700

--
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  2 ++
 .../java/org/apache/hadoop/hdfs/TestSafeMode.java   | 16 
 2 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040bae6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b1639b2..caf73f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7081,6 +7081,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   checkOperation(OperationCategory.WRITE);
   for (ErasureCodingPolicy policy : policies) {
 try {
+  checkOperation(OperationCategory.WRITE);
+  checkNameNodeSafeMode("Cannot add erasure coding policy");
   ErasureCodingPolicy newPolicy =
   FSDirErasureCodingOp.addErasureCodePolicy(this, policy);
   addECPolicyName = newPolicy.getName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040bae6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index f03b440..bc95ec7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -466,6 +468,20 @@ public class TestSafeMode {
   // expected
 }
 
+ECSchema toAddSchema = new ECSchema("testcodec", 3, 2);
+ErasureCodingPolicy newPolicy =
+new ErasureCodingPolicy(toAddSchema, 128 * 1024);
+ErasureCodingPolicy[] policyArray =
+new ErasureCodingPolicy[]{newPolicy};
+try {
+  dfs.addErasureCodingPolicies(policyArray);
+  fail("AddErasureCodingPolicies should have failed.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(
+  "Cannot add erasure coding policy", ioe);
+  // expected
+}
+
 assertFalse("Could not leave SM",
 dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: MAPREDUCE-6940. Copy-paste error in the TaskAttemptUnsuccessfulCompletionEvent constructor. Contributed by Oleg Danilov

2017-08-21 Thread aengineer
MAPREDUCE-6940. Copy-paste error in the TaskAttemptUnsuccessfulCompletionEvent 
constructor. Contributed by Oleg Danilov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0acc5e00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0acc5e00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0acc5e00

Branch: refs/heads/HDFS-7240
Commit: 0acc5e00362602f027524637a86ca1bf80982986
Parents: de462da
Author: Jason Lowe 
Authored: Wed Aug 16 16:34:06 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 16:34:06 2017 -0500

--
 .../TaskAttemptUnsuccessfulCompletionEvent.java | 28 ++--
 1 file changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0acc5e00/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
index 1732d91..1752967 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
@@ -60,7 +60,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
   int[] physMemKbytes;
   private static final Counters EMPTY_COUNTERS = new Counters();
 
-  /** 
+  /**
* Create an event to record the unsuccessful completion of attempts
* @param id Attempt ID
* @param taskType Type of the task
@@ -74,7 +74,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
* @param allSplits the "splits", or a pixelated graph of various
*measurable worker node state variables against progress.
*Currently there are four; wallclock time, CPU time,
-   *virtual memory and physical memory.  
+   *virtual memory and physical memory.
*/
   public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
@@ -101,7 +101,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
 ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
   }
 
-  /** 
+  /**
* @deprecated please use the constructor with an additional
*  argument, an array of splits arrays instead.  See
*  {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
@@ -117,19 +117,19 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
*/
   public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
-String status, long finishTime, 
+String status, long finishTime,
 String hostname, String error) {
 this(id, taskType, status, finishTime, hostname, -1, "",
 error, EMPTY_COUNTERS, null);
   }
-  
+
   public TaskAttemptUnsuccessfulCompletionEvent
   (TaskAttemptID id, TaskType taskType,
String status, long finishTime,
String hostname, int port, String rackName,
String error, int[][] allSplits) {
 this(id, taskType, status, finishTime, hostname, port,
-rackName, error, EMPTY_COUNTERS, null);
+rackName, error, EMPTY_COUNTERS, allSplits);
   }
 
   TaskAttemptUnsuccessfulCompletionEvent() {}
@@ -162,9 +162,9 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
 }
 return datum;
   }
-  
-  
-  
+
+
+
   public void setDatum(Object odatum) {
 this.datum =
 (TaskAttemptUnsuccessfulCompletion)odatum;
@@ -208,12 +208,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
   public String getHostname() { return hostname; }
   /** Get the rpc port for the host where the attempt executed */
   public int getPort() { return port; }
-  
+
   /** Get the rack name of the node where the attempt ran */
   public String getRackName() {
 return rackName == null ? null : rackName.toString();
   }
-  
+
   /** Get the error string */
   public String getError() { return error.toString(); }
   /** Get the task status */
@@ -224,12 +224,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements 

[22/50] [abbrv] hadoop git commit: YARN-7014. Fix off-by-one error causing heap corruption (Jason Lowe via nroberts)

2017-08-21 Thread aengineer
YARN-7014. Fix off-by-one error causing heap corruption (Jason Lowe via 
nroberts)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2654590
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2654590
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2654590

Branch: refs/heads/HDFS-7240
Commit: d265459024b8e5f5eccf421627f684ca8f162112
Parents: dadb0c2
Author: Nathan Roberts 
Authored: Tue Aug 15 15:52:48 2017 -0500
Committer: Nathan Roberts 
Committed: Tue Aug 15 15:52:48 2017 -0500

--
 .../src/main/native/container-executor/impl/utils/string-utils.c  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2654590/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
index 703d484..063df7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
@@ -44,8 +44,7 @@ int validate_container_id(const char* input) {
* container_e17_1410901177871_0001_01_05
* container_1410901177871_0001_01_05
*/
-  char* input_cpy = malloc(strlen(input));
-  strcpy(input_cpy, input);
+  char* input_cpy = strdup(input);
   char* p = strtok(input_cpy, "_");
   int idx = 0;
   while (p != NULL) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: YARN-6987. Log app attempt during InvalidStateTransition. Contributed by Jonathan Eagles

2017-08-21 Thread aengineer
YARN-6987. Log app attempt during InvalidStateTransition. Contributed by 
Jonathan Eagles


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3325ef65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3325ef65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3325ef65

Branch: refs/heads/HDFS-7240
Commit: 3325ef653d6f364a82dd32485d9ef6d987380ce3
Parents: 6b09c32
Author: Jason Lowe 
Authored: Mon Aug 14 14:40:08 2017 -0500
Committer: Jason Lowe 
Committed: Mon Aug 14 14:40:08 2017 -0500

--
 .../hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java   | 3 ++-
 .../server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java| 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3325ef65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index fa2f20c..03be793 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -888,7 +888,8 @@ public class RMAppImpl implements RMApp, Recoverable {
 /* keep the master in sync with the state machine */
 this.stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.error("Can't handle this event at current state", e);
+LOG.error("App: " + appID
++ " can't handle this event at current state", e);
 /* TODO fail the application on the failed transition */
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3325ef65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 254768b..7d453bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -911,7 +911,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 /* keep the master in sync with the state machine */
 this.stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.error("Can't handle this event at current state", e);
+LOG.error("App attempt: " + appAttemptID
++ " can't handle this event at current state", e);
 /* TODO fail the application on the failed transition */
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: MAPREDUCE-6936. Remove unnecessary dependency of hadoop-yarn-server-common from hadoop-mapreduce-client-common (haibochen via rkanter)

2017-08-21 Thread aengineer
MAPREDUCE-6936. Remove unnecessary dependency of hadoop-yarn-server-common from 
hadoop-mapreduce-client-common (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab051bd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab051bd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab051bd4

Branch: refs/heads/HDFS-7240
Commit: ab051bd42ee1d7c4d3b7cc71e6b2734a0955e767
Parents: 0acc5e0
Author: Robert Kanter 
Authored: Wed Aug 16 16:14:04 2017 -0700
Committer: Robert Kanter 
Committed: Wed Aug 16 16:14:04 2017 -0700

--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab051bd4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index db8ae49..b88b012 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -46,10 +46,6 @@
   org.apache.hadoop
   hadoop-mapreduce-client-core
 
-
-  org.apache.hadoop
-  hadoop-yarn-server-common
-
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HADOOP-14773. Extend ZKCuratorManager API for more reusability. (Íñigo Goiri via Subru).

2017-08-21 Thread aengineer
HADOOP-14773. Extend ZKCuratorManager API for more reusability. (Íñigo Goiri 
via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75dd866b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75dd866b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75dd866b

Branch: refs/heads/HDFS-7240
Commit: 75dd866bfb8b63cb9f13179d4365b05c48e0907d
Parents: f34646d
Author: Subru Krishnan 
Authored: Tue Aug 15 16:53:59 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Aug 15 16:53:59 2017 -0700

--
 .../hadoop/util/curator/ZKCuratorManager.java   | 54 ++--
 .../util/curator/TestZKCuratorManager.java  |  2 +-
 .../recovery/ZKRMStateStore.java| 19 +--
 3 files changed, 52 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75dd866b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
index 3adf028..9a031af 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
@@ -33,9 +33,12 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Helper class that provides utility methods specific to ZK operations.
  */
@@ -179,7 +182,6 @@ public final class ZKCuratorManager {
   /**
* Get the data in a ZNode.
* @param path Path of the ZNode.
-   * @param stat Output statistics of the ZNode.
* @return The data in the ZNode.
* @throws Exception If it cannot contact Zookeeper.
*/
@@ -190,16 +192,38 @@ public final class ZKCuratorManager {
   /**
* Get the data in a ZNode.
* @param path Path of the ZNode.
-   * @param stat Output statistics of the ZNode.
+   * @param stat
+   * @return The data in the ZNode.
+   * @throws Exception If it cannot contact Zookeeper.
+   */
+  public byte[] getData(final String path, Stat stat) throws Exception {
+return curator.getData().storingStatIn(stat).forPath(path);
+  }
+
+  /**
+   * Get the data in a ZNode.
+   * @param path Path of the ZNode.
* @return The data in the ZNode.
* @throws Exception If it cannot contact Zookeeper.
*/
-  public String getSringData(final String path) throws Exception {
+  public String getStringData(final String path) throws Exception {
 byte[] bytes = getData(path);
 return new String(bytes, Charset.forName("UTF-8"));
   }
 
   /**
+   * Get the data in a ZNode.
+   * @param path Path of the ZNode.
+   * @param stat Output statistics of the ZNode.
+   * @return The data in the ZNode.
+   * @throws Exception If it cannot contact Zookeeper.
+   */
+  public String getStringData(final String path, Stat stat) throws Exception {
+byte[] bytes = getData(path, stat);
+return new String(bytes, Charset.forName("UTF-8"));
+  }
+
+  /**
* Set data into a ZNode.
* @param path Path of the ZNode.
* @param data Data to set.
@@ -272,14 +296,36 @@ public final class ZKCuratorManager {
   }
 
   /**
+   * Utility function to ensure that the configured base znode exists.
+   * This recursively creates the znode as well as all of its parents.
+   * @param path Path of the znode to create.
+   * @throws Exception If it cannot create the file.
+   */
+  public void createRootDirRecursively(String path) throws Exception {
+String[] pathParts = path.split("/");
+Preconditions.checkArgument(
+pathParts.length >= 1 && pathParts[0].isEmpty(),
+"Invalid path: %s", path);
+StringBuilder sb = new StringBuilder();
+
+for (int i = 1; i < pathParts.length; i++) {
+  sb.append("/").append(pathParts[i]);
+  create(sb.toString());
+}
+  }
+
+  /**
* Delete a ZNode.
* @param path Path of the ZNode.
+   * @return If the znode was deleted.
* @throws Exception If it cannot contact ZooKeeper.
*/
-  public void delete(final String path) throws Exception {
+  public boolean delete(final String path) throws Exception {
 if (exists(path)) {
   curator.delete().deletingChildrenIfNeeded().forPath(path);
+  return true;
 }
+return false;
   }
 
   /**


[42/50] [abbrv] hadoop git commit: YARN-6969. Clean up unused code in class FairSchedulerQueueInfo. (Larry Lo via Yufei Gu)

2017-08-21 Thread aengineer
YARN-6969. Clean up unused code in class FairSchedulerQueueInfo. (Larry Lo via 
Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8991f0ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8991f0ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8991f0ba

Branch: refs/heads/HDFS-7240
Commit: 8991f0baec62625c45144e2544066195800ab95b
Parents: 2d105a2
Author: Yufei Gu 
Authored: Fri Aug 18 14:38:44 2017 -0700
Committer: Yufei Gu 
Committed: Fri Aug 18 14:38:44 2017 -0700

--
 .../webapp/dao/FairSchedulerQueueInfo.java | 17 -
 1 file changed, 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8991f0ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index 79339c7..913513c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -48,8 +48,6 @@ public class FairSchedulerQueueInfo {
   @XmlTransient
   private float fractionMemFairShare;
   @XmlTransient
-  private float fractionMemMinShare;
-  @XmlTransient
   private float fractionMemMaxShare;
   
   private ResourceInfo minResources;
@@ -63,7 +61,6 @@ public class FairSchedulerQueueInfo {
   private ResourceInfo clusterResources;
   private ResourceInfo reservedResources;
 
-  private long pendingContainers;
   private long allocatedContainers;
   private long reservedContainers;
 
@@ -108,12 +105,10 @@ public class FairSchedulerQueueInfo {
 (float)steadyFairResources.getMemorySize() / 
clusterResources.getMemorySize();
 fractionMemFairShare = (float) fairResources.getMemorySize()
 / clusterResources.getMemorySize();
-fractionMemMinShare = (float)minResources.getMemorySize() / 
clusterResources.getMemorySize();
 fractionMemMaxShare = (float)maxResources.getMemorySize() / 
clusterResources.getMemorySize();
 
 maxApps = queue.getMaxRunningApps();
 
-pendingContainers = queue.getMetrics().getPendingContainers();
 allocatedContainers = queue.getMetrics().getAllocatedContainers();
 reservedContainers = queue.getMetrics().getReservedContainers();
 
@@ -126,10 +121,6 @@ public class FairSchedulerQueueInfo {
 childQueues = getChildQueues(queue, scheduler);
   }
 
-  public long getPendingContainers() {
-return pendingContainers;
-  }
-
   public long getAllocatedContainers() {
 return allocatedContainers;
   }
@@ -234,14 +225,6 @@ public class FairSchedulerQueueInfo {
   }
 
   /**
-   * Returns the queue's min share in as a fraction of the entire
-   * cluster capacity.
-   */
-  public float getMinShareMemoryFraction() {
-return fractionMemMinShare;
-  }
-  
-  /**
* Returns the memory used by this queue as a fraction of the entire 
* cluster capacity.
*/


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HADOOP-14194. Aliyun OSS should not use empty endpoint as default. Contributed by Genmao Yu

2017-08-21 Thread aengineer
HADOOP-14194. Aliyun OSS should not use empty endpoint as default. Contributed 
by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/267e19a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/267e19a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/267e19a0

Branch: refs/heads/HDFS-7240
Commit: 267e19a09f366a965b30c8d4dc75e377b0d92fff
Parents: 7a82d7b
Author: Kai Zheng 
Authored: Mon Aug 21 13:36:28 2017 +0800
Committer: Kai Zheng 
Committed: Mon Aug 21 13:36:28 2017 +0800

--
 .../apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/267e19a0/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index a944fc1..a85a739 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -129,6 +129,10 @@ public class AliyunOSSFileSystemStore {
 }
 
 String endPoint = conf.getTrimmed(ENDPOINT_KEY, "");
+if (StringUtils.isEmpty(endPoint)) {
+  throw new IllegalArgumentException("Aliyun OSS endpoint should not be " +
+"null or empty. Please set proper endpoint with 'fs.oss.endpoint'.");
+}
 CredentialsProvider provider =
 AliyunOSSUtils.getCredentialsProvider(conf);
 ossClient = new OSSClient(endPoint, provider, clientConf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: Revert "HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure durations. Contributed by Hanisha Koneru."

2017-08-21 Thread aengineer
Revert "HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure 
durations. Contributed by Hanisha Koneru."

This reverts commit 8bef4eca28a3466707cc4ea0de0330449319a5eb.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d105a20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d105a20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d105a20

Branch: refs/heads/HDFS-7240
Commit: 2d105a206884b62ccdba61f2de3e2fe65fc43074
Parents: e05fa34
Author: Arpit Agarwal 
Authored: Fri Aug 18 10:15:52 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Aug 18 10:15:52 2017 -0700

--
 .../java/org/apache/hadoop/ipc/ProtobufRpcEngine.java | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d105a20/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 2c0cfe5..639bbad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -190,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = Time.monotonicNow();
+startTime = Time.now();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -245,7 +245,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = Time.monotonicNow() - startTime;
+long callTime = Time.now() - startTime;
 LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
   }
   
@@ -373,19 +373,19 @@ public class ProtobufRpcEngine implements RpcEngine {
 this.server = currentCallInfo.get().server;
 this.call = Server.getCurCall().get();
 this.methodName = currentCallInfo.get().methodName;
-this.setupTime = Time.monotonicNow();
+this.setupTime = Time.now();
   }
 
   @Override
   public void setResponse(Message message) {
-long processingTime = Time.monotonicNow() - setupTime;
+long processingTime = Time.now() - setupTime;
 call.setDeferredResponse(RpcWritable.wrap(message));
 server.updateDeferredMetrics(methodName, processingTime);
   }
 
   @Override
   public void error(Throwable t) {
-long processingTime = Time.monotonicNow() - setupTime;
+long processingTime = Time.now() - setupTime;
 String detailedMetricsName = t.getClass().getSimpleName();
 server.updateDeferredMetrics(detailedMetricsName, processingTime);
 call.setDeferredError(t);
@@ -513,7 +513,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 Message param = request.getValue(prototype);
 
 Message result;
-long startTime = Time.monotonicNow();
+long startTime = Time.now();
 int qTime = (int) (startTime - receiveTime);
 Exception exception = null;
 boolean isDeferred = false;
@@ -537,7 +537,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   throw e;
 } finally {
   currentCallInfo.set(null);
-  int processingTime = (int) (Time.monotonicNow() - startTime);
+  int processingTime = (int) (Time.now() - startTime);
   if (LOG.isDebugEnabled()) {
 String msg =
 "Served: " + methodName + (isDeferred ? ", deferred" : "") +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7043. Cleanup ResourceProfileManager. (wangda)

2017-08-21 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3926 f461a2d42 -> 7d9f92cf0


YARN-7043. Cleanup ResourceProfileManager. (wangda)

Change-Id: I463356f37bf1f6a3f1fc3c594c79916e8c0ab913


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d9f92cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d9f92cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d9f92cf

Branch: refs/heads/YARN-3926
Commit: 7d9f92cf0f35f542256480a8adc14fe89b10ec99
Parents: f461a2d
Author: Wangda Tan 
Authored: Mon Aug 21 17:20:06 2017 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 17:20:06 2017 -0700

--
 .../ams/ApplicationMasterServiceProcessor.java  |  8 ++-
 .../yarn/api/ApplicationClientProtocol.java | 11 ++--
 .../YARNFeatureNotEnabledException.java | 45 +++
 .../yarn/util/resource/ResourceUtils.java   | 21 +++
 .../hadoop/yarn/client/api/YarnClient.java  | 13 +++--
 .../resource/DominantResourceCalculator.java|  6 +-
 .../hadoop/yarn/api/TestPBImplRecords.java  |  1 +
 .../resourcemanager/AMSProcessingChain.java |  2 +-
 .../server/resourcemanager/ClientRMService.java | 26 ++---
 .../resourcemanager/DefaultAMSProcessor.java| 12 ++--
 ...pportunisticContainerAllocatorAMService.java |  3 +-
 .../server/resourcemanager/RMServerUtils.java   | 14 -
 .../resource/ResourceProfilesManager.java   | 32 +-
 .../resource/ResourceProfilesManagerImpl.java   | 61 +---
 .../scheduler/AbstractYarnScheduler.java| 18 +-
 .../TestApplicationMasterService.java   | 11 ++--
 .../resource/TestResourceProfiles.java  | 10 +++-
 17 files changed, 191 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d9f92cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
index b7d925a..8e76a11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
@@ -52,11 +52,13 @@ public interface ApplicationMasterServiceProcessor {
* @param request Register Request.
* @param response Register Response.
* @throws IOException IOException.
+   * @throws YarnException in critical situation where invalid
+   * profiles/resources are added.
*/
-  void registerApplicationMaster(
-  ApplicationAttemptId applicationAttemptId,
+  void registerApplicationMaster(ApplicationAttemptId applicationAttemptId,
   RegisterApplicationMasterRequest request,
-  RegisterApplicationMasterResponse response) throws IOException;
+  RegisterApplicationMasterResponse response)
+  throws IOException, YarnException;
 
   /**
* Allocate call.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d9f92cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
index e745697..8456a8e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
 
 /**
  * The protocol between clients and the ResourceManager
@@ -604,7 +605,8 @@ public interface ApplicationClientProtocol extends 
ApplicationBaseProtocol {
* @param request request to get all the resource profiles
* @return Response containing a map of the profile name to Resource
* capabilities
-   * @throws YarnException if resource 

[12/45] hadoop git commit: HADOOP-14194. Aliyun OSS should not use empty endpoint as default. Contributed by Genmao Yu

2017-08-21 Thread wangda
HADOOP-14194. Aliyun OSS should not use empty endpoint as default. Contributed 
by Genmao Yu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/267e19a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/267e19a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/267e19a0

Branch: refs/heads/YARN-3926
Commit: 267e19a09f366a965b30c8d4dc75e377b0d92fff
Parents: 7a82d7b
Author: Kai Zheng 
Authored: Mon Aug 21 13:36:28 2017 +0800
Committer: Kai Zheng 
Committed: Mon Aug 21 13:36:28 2017 +0800

--
 .../apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/267e19a0/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index a944fc1..a85a739 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -129,6 +129,10 @@ public class AliyunOSSFileSystemStore {
 }
 
 String endPoint = conf.getTrimmed(ENDPOINT_KEY, "");
+if (StringUtils.isEmpty(endPoint)) {
+  throw new IllegalArgumentException("Aliyun OSS endpoint should not be " +
+"null or empty. Please set proper endpoint with 'fs.oss.endpoint'.");
+}
 CredentialsProvider provider =
 AliyunOSSUtils.getCredentialsProvider(conf);
 ossClient = new OSSClient(endPoint, provider, clientConf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/45] hadoop git commit: YARN-6852. Native code changes to support isolate GPU devices by using CGroups. (wangda)

2017-08-21 Thread wangda
YARN-6852. Native code changes to support isolate GPU devices by using CGroups. 
(wangda)

Change-Id: I4869cc4d8ad539539ccba4bea5a178cacdb741ab


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/436c2638
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/436c2638
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/436c2638

Branch: refs/heads/YARN-3926
Commit: 436c2638f9ca1fb8de6a630cb5e91d956ac75216
Parents: 8991f0b
Author: Wangda Tan 
Authored: Fri Aug 18 18:26:36 2017 -0700
Committer: Wangda Tan 
Committed: Fri Aug 18 18:26:36 2017 -0700

--
 .../src/CMakeLists.txt  |  12 +-
 .../impl/container-executor.c   |  10 +-
 .../impl/container-executor.h   |   2 +
 .../main/native/container-executor/impl/main.c  |  13 +-
 .../impl/modules/cgroups/cgroups-operations.c   | 161 +
 .../impl/modules/cgroups/cgroups-operations.h   |  55 +
 .../impl/modules/common/constants.h |  29 +++
 .../impl/modules/common/module-configs.c|  41 
 .../impl/modules/common/module-configs.h|  33 +++
 .../impl/modules/gpu/gpu-module.c   | 229 +++
 .../impl/modules/gpu/gpu-module.h   |  45 
 .../container-executor/impl/utils/path-utils.c  |  52 +
 .../container-executor/impl/utils/path-utils.h  |  35 +++
 .../impl/utils/string-utils.c   | 106 +++--
 .../impl/utils/string-utils.h   |   7 +-
 .../test/modules/cgroups/test-cgroups-module.cc | 121 ++
 .../test/modules/gpu/test-gpu-module.cc | 203 
 .../test/test-container-executor-common.h   |  36 +++
 .../test/test-container-executor.c  |  23 +-
 .../native/container-executor/test/test_main.cc |  11 +-
 .../test/utils/test-path-utils.cc   |  67 ++
 .../test/utils/test-string-utils.cc |  93 
 22 files changed, 1338 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/436c2638/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 100d7ca..07c29bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -101,6 +101,10 @@ add_library(container
 main/native/container-executor/impl/container-executor.c
 main/native/container-executor/impl/get_executable.c
 main/native/container-executor/impl/utils/string-utils.c
+main/native/container-executor/impl/utils/path-utils.c
+main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
+main/native/container-executor/impl/modules/common/module-configs.c
+main/native/container-executor/impl/modules/gpu/gpu-module.c
 )
 
 add_executable(container-executor
@@ -113,12 +117,14 @@ target_link_libraries(container-executor
 
 output_directory(container-executor target/usr/local/bin)
 
+# Test cases
 add_executable(test-container-executor
 main/native/container-executor/test/test-container-executor.c
 )
 target_link_libraries(test-container-executor
 container ${EXTRA_LIBS}
 )
+
 output_directory(test-container-executor target/usr/local/bin)
 
 # unit tests for container executor
@@ -126,6 +132,10 @@ add_executable(cetest
 main/native/container-executor/impl/util.c
 main/native/container-executor/test/test_configuration.cc
 main/native/container-executor/test/test_main.cc
+main/native/container-executor/test/utils/test-string-utils.cc
+main/native/container-executor/test/utils/test-path-utils.cc
+
main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc
+main/native/container-executor/test/modules/gpu/test-gpu-module.cc
 main/native/container-executor/test/test_util.cc)
-target_link_libraries(cetest gtest)
+target_link_libraries(cetest gtest container)
 output_directory(cetest test)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/436c2638/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 

[30/45] hadoop git commit: YARN-6232. Update resource usage and preempted resource calculations to take into account all resource types. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb5a99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
index cd04264..47e517f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
@@ -36,6 +36,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
@@ -98,15 +99,12 @@ public class RMAppBlock extends AppBlock{
   attemptResourcePreempted)
 .__("Number of Non-AM Containers Preempted from Current Attempt:",
   attemptNumNonAMContainerPreempted)
-.__("Aggregate Resource Allocation:",
-  String.format("%d MB-seconds, %d vcore-seconds",
-  appMetrics == null ? "N/A" : appMetrics.getMemorySeconds(),
-  appMetrics == null ? "N/A" : appMetrics.getVcoreSeconds()))
+.__("Aggregate Resource Allocation:", appMetrics == null ? "N/A" :
+StringHelper
+.getResourceSecondsString(appMetrics.getResourceSecondsMap()))
 .__("Aggregate Preempted Resource Allocation:",
-  String.format("%d MB-seconds, %d vcore-seconds",
-appMetrics == null ? "N/A" : 
appMetrics.getPreemptedMemorySeconds(),
-appMetrics == null ? "N/A" :
-appMetrics.getPreemptedVcoreSeconds()));
+appMetrics == null ? "N/A" : StringHelper.getResourceSecondsString(
+appMetrics.getPreemptedResourceSecondsMap()));
 
 pdiv.__();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb5a99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index f11939a..6036fb5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -100,6 +100,7 @@ public class AppInfo {
   protected long vcoreSeconds;
   protected float queueUsagePercentage;
   protected float clusterUsagePercentage;
+  protected Map resourceSecondsMap;
 
   // preemption info fields
   protected long preemptedResourceMB;
@@ -108,6 +109,7 @@ public class AppInfo {
   protected int numAMContainerPreempted;
   private long preemptedMemorySeconds;
   private long preemptedVcoreSeconds;
+  protected Map preemptedResourceSecondsMap;
 
   // list of resource requests
   @XmlElement(name = "resourceRequests")
@@ -238,8 +240,10 @@ public class AppInfo {
   appMetrics.getResourcePreempted().getVirtualCores();
   memorySeconds = appMetrics.getMemorySeconds();
   vcoreSeconds = appMetrics.getVcoreSeconds();
+  resourceSecondsMap = appMetrics.getResourceSecondsMap();
   preemptedMemorySeconds = appMetrics.getPreemptedMemorySeconds();
   preemptedVcoreSeconds = appMetrics.getPreemptedVcoreSeconds();
+  preemptedResourceSecondsMap = 
appMetrics.getPreemptedResourceSecondsMap();
   ApplicationSubmissionContext appSubmissionContext =
   app.getApplicationSubmissionContext();
   unmanagedApplication =
@@ -432,7 +436,7 @@ 

[35/45] hadoop git commit: YARN-7042. Clean up unit tests after YARN-6610. (Daniel Templeton via wangda)

2017-08-21 Thread wangda
YARN-7042. Clean up unit tests after YARN-6610. (Daniel Templeton via wangda)

Change-Id: I8e40f704b6fcdd5b14faa9548a27986501044fa1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b073c6f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b073c6f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b073c6f1

Branch: refs/heads/YARN-3926
Commit: b073c6f155105f2a8d1bc47a087fc6f745fbc2f5
Parents: 9d6ca4b
Author: Wangda Tan 
Authored: Thu Aug 17 11:18:08 2017 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../resource/DominantResourceCalculator.java|  2 +-
 .../util/resource/TestResourceCalculator.java   | 95 ++--
 2 files changed, 49 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b073c6f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 40b38b9..1e99bc7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -126,7 +126,7 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 diff = max[0] - max[1];
   } else if (clusterRes.length == 2) {
 // Special case to handle the common scenario of only CPU and memory
-// so the we can optimize for performance
+// so that we can optimize for performance
 diff = calculateSharesForMandatoryResources(clusterRes, lhs, rhs,
 lhsShares, rhsShares);
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b073c6f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
index 19e7f8d..5b4155c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
@@ -24,7 +24,7 @@ import java.util.Collection;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -44,13 +44,18 @@ public class TestResourceCalculator {
 { new DominantResourceCalculator() } });
   }
 
-  @BeforeClass
-  public static void setup() {
+  @Before
+  public void setupNoExtraResource() {
+// This has to run before each test because we don't know when
+// setupExtraResource() might be called
+ResourceUtils.resetResourceTypes(new Configuration());
+  }
+
+  private static void setupExtraResource() {
 Configuration conf = new Configuration();
 
 conf.set(YarnConfiguration.RESOURCE_TYPES, "test");
 ResourceUtils.resetResourceTypes(conf);
-ResourceUtils.getResourceTypes();
   }
 
   public TestResourceCalculator(ResourceCalculator rs) {
@@ -86,9 +91,15 @@ public class TestResourceCalculator {
 }
   }
 
-  private Resource newResource(long memory, int cpu, int test) {
+  private Resource newResource(long memory, int cpu) {
 Resource res = Resource.newInstance(memory, cpu);
 
+return res;
+  }
+
+  private Resource newResource(long memory, int cpu, int test) {
+Resource res = newResource(memory, cpu);
+
 res.setResourceValue("test", test);
 
 return res;
@@ -123,28 +134,48 @@ public class TestResourceCalculator {
   }
 
   @Test
-  public void testCompare2() {
+  public void testCompareWithOnlyMandatory() {
+// This test is necessary because there are optimizations that are only
+// triggered when only the mandatory resources are configured.
+
 // Keep cluster 

[17/45] hadoop git commit: YARN-4172. Extend DominantResourceCalculator to account for all resources. (Varun Vasudev via wangda)

2017-08-21 Thread wangda
YARN-4172. Extend DominantResourceCalculator to account for all resources. 
(Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88026b2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88026b2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88026b2a

Branch: refs/heads/YARN-3926
Commit: 88026b2aba1e6c745baf47f40fc236294b488260
Parents: 64439e4
Author: Wangda Tan 
Authored: Fri Jan 29 10:53:31 2016 +0800
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:54 2017 -0700

--
 .../resource/DominantResourceCalculator.java| 380 +--
 1 file changed, 273 insertions(+), 107 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88026b2a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 7697e1d..a94e7a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -22,25 +22,31 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+
+import java.util.HashSet;
+import java.util.Set;
 
 /**
- * A {@link ResourceCalculator} which uses the concept of  
+ * A {@link ResourceCalculator} which uses the concept of
  * dominant resource to compare multi-dimensional resources.
  *
- * Essentially the idea is that the in a multi-resource environment, 
- * the resource allocation should be determined by the dominant share 
- * of an entity (user or queue), which is the maximum share that the 
- * entity has been allocated of any resource. 
- * 
- * In a nutshell, it seeks to maximize the minimum dominant share across 
- * all entities. 
- * 
+ * Essentially the idea is that the in a multi-resource environment,
+ * the resource allocation should be determined by the dominant share
+ * of an entity (user or queue), which is the maximum share that the
+ * entity has been allocated of any resource.
+ *
+ * In a nutshell, it seeks to maximize the minimum dominant share across
+ * all entities.
+ *
  * For example, if user A runs CPU-heavy tasks and user B runs
- * memory-heavy tasks, it attempts to equalize CPU share of user A 
- * with Memory-share of user B. 
- * 
+ * memory-heavy tasks, it attempts to equalize CPU share of user A
+ * with Memory-share of user B.
+ *
  * In the single resource case, it reduces to max-min fairness for that 
resource.
- * 
+ *
  * See the Dominant Resource Fairness paper for more details:
  * www.cs.berkeley.edu/~matei/papers/2011/nsdi_drf.pdf
  */
@@ -50,6 +56,56 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   private static final Log LOG =
   LogFactory.getLog(DominantResourceCalculator.class);
 
+
+  private Set resourceNames;
+
+  public DominantResourceCalculator() {
+resourceNames = new HashSet<>();
+resourceNames.add(ResourceInformation.MEMORY.getName());
+resourceNames.add(ResourceInformation.VCORES.getName());
+  }
+
+  /**
+   * Compare two resources - if the value for every resource type for the lhs
+   * is greater than that of the rhs, return 1. If the value for every resource
+   * type in the lhs is less than the rhs, return -1. Otherwise, return 0
+   *
+   * @param lhs resource to be compared
+   * @param rhs resource to be compared
+   * @return 0, 1, or -1
+   */
+  private int compare(Resource lhs, Resource rhs) {
+boolean lhsGreater = false;
+boolean rhsGreater = false;
+int ret = 0;
+
+for (String rName : resourceNames) {
+  try {
+ResourceInformation lhsResourceInformation =
+lhs.getResourceInformation(rName);
+ResourceInformation rhsResourceInformation =
+rhs.getResourceInformation(rName);
+int diff = lhsResourceInformation.compareTo(rhsResourceInformation);
+if (diff >= 1) {
+  lhsGreater = true;
+} 

[32/45] hadoop git commit: YARN-6445. [YARN-3926] Performance improvements in resource profile branch with respect to SLS. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-6445. [YARN-3926] Performance improvements in resource profile branch with 
respect to SLS. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1c16576
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1c16576
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1c16576

Branch: refs/heads/YARN-3926
Commit: f1c16576716206700c6056a7aa9201d996de5c7f
Parents: 5fb5a99
Author: Sunil G 
Authored: Tue Apr 25 11:53:11 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:01 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   | 19 +--
 .../yarn/api/records/ResourceInformation.java   | 51 ++---
 .../hadoop/yarn/util/UnitsConversionUtil.java   | 34 ++-
 .../yarn/conf/TestResourceInformation.java  |  4 +-
 .../yarn/util/TestUnitsConversionUtil.java  | 60 ++--
 .../api/records/impl/pb/ResourcePBImpl.java | 23 
 .../resource/DominantResourceCalculator.java| 54 --
 .../hadoop/yarn/util/resource/Resources.java| 18 +++---
 .../yarn/util/resource/TestResourceUtils.java   |  1 +
 .../yarn/util/resource/TestResources.java   | 12 +++-
 .../resource/ResourceProfilesManagerImpl.java   |  3 +-
 11 files changed, 157 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1c16576/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index c349a32..4356986 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -107,12 +107,23 @@ public abstract class Resource implements 
Comparable {
   @InterfaceStability.Unstable
   public static Resource newInstance(Resource resource) {
 Resource ret = Resource.newInstance(0, 0);
-for (Map.Entry entry : resource.getResources()
+Resource.copy(resource, ret);
+return ret;
+  }
+
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public static void copy(Resource source, Resource dest) {
+for (Map.Entry entry : source.getResources()
 .entrySet()) {
-  ret.setResourceInformation(entry.getKey(),
-  ResourceInformation.newInstance(entry.getValue()));
+  try {
+ResourceInformation.copy(entry.getValue(),
+dest.getResourceInformation(entry.getKey()));
+  } catch (YarnException ye) {
+dest.setResourceInformation(entry.getKey(),
+ResourceInformation.newInstance(entry.getValue()));
+  }
 }
-return ret;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1c16576/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 7d74efc..d75b441 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -30,9 +30,9 @@ public class ResourceInformation implements 
Comparable {
   private String name;
   private String units;
   private ResourceTypes resourceType;
-  private Long value;
-  private Long minimumAllocation;
-  private Long maximumAllocation;
+  private long value;
+  private long minimumAllocation;
+  private long maximumAllocation;
 
   private static final String MEMORY_URI = "memory-mb";
   private static final String VCORES_URI = "vcores";
@@ -106,7 +106,7 @@ public class ResourceInformation implements 
Comparable {
*
* @return the resource value
*/
-  public Long getValue() {
+  public long getValue() {
 return value;
   }
 
@@ -115,7 +115,7 @@ public class ResourceInformation implements 
Comparable {
*
* @param rValue the resource value
*/
-  public void setValue(Long rValue) {
+  public void setValue(long rValue) {

[36/45] hadoop git commit: YARN-6761. Fix build for YARN-3926 branch. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-6761. Fix build for YARN-3926 branch. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e757abde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e757abde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e757abde

Branch: refs/heads/YARN-3926
Commit: e757abde95142df05aa8f6ebfbca63ec53273da0
Parents: f1c1657
Author: Sunil G 
Authored: Mon Jul 10 09:21:26 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   | 112 +++
 .../resource/DominantResourceCalculator.java|   1 +
 2 files changed, 90 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e757abde/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 4356986..9a8e2ec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -29,6 +29,8 @@ import 
org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 
 /**
@@ -58,12 +60,17 @@ import java.util.Map;
 @Stable
 public abstract class Resource implements Comparable {
 
+  private static Resource tmpResource = Records.newRecord(Resource.class);
+
   private static class SimpleResource extends Resource {
 private long memory;
 private long vcores;
+private Map resourceInformationMap;
+
 SimpleResource(long memory, long vcores) {
   this.memory = memory;
   this.vcores = vcores;
+
 }
 @Override
 public int getMemory() {
@@ -89,17 +96,44 @@ public abstract class Resource implements 
Comparable {
 public void setVirtualCores(int vcores) {
   this.vcores = vcores;
 }
+@Override
+public Map getResources() {
+  if (resourceInformationMap == null) {
+resourceInformationMap = new HashMap<>();
+resourceInformationMap.put(ResourceInformation.MEMORY_MB.getName(),
+ResourceInformation.newInstance(ResourceInformation.MEMORY_MB));
+resourceInformationMap.put(ResourceInformation.VCORES.getName(),
+ResourceInformation.newInstance(ResourceInformation.VCORES));
+  }
+  resourceInformationMap.get(ResourceInformation.MEMORY_MB.getName())
+  .setValue(this.memory);
+  resourceInformationMap.get(ResourceInformation.VCORES.getName())
+  .setValue(this.vcores);
+  return Collections.unmodifiableMap(resourceInformationMap);
+}
   }
 
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {
+if (tmpResource.getResources().size() > 2) {
+  Resource ret = Records.newRecord(Resource.class);
+  ret.setMemorySize(memory);
+  ret.setVirtualCores(vCores);
+  return ret;
+}
 return new SimpleResource(memory, vCores);
   }
 
   @Public
   @Stable
   public static Resource newInstance(long memory, int vCores) {
+if (tmpResource.getResources().size() > 2) {
+  Resource ret = Records.newRecord(Resource.class);
+  ret.setMemorySize(memory);
+  ret.setVirtualCores(vCores);
+  return ret;
+}
 return new SimpleResource(memory, vCores);
   }
 
@@ -116,13 +150,7 @@ public abstract class Resource implements 
Comparable {
   public static void copy(Resource source, Resource dest) {
 for (Map.Entry entry : source.getResources()
 .entrySet()) {
-  try {
-ResourceInformation.copy(entry.getValue(),
-dest.getResourceInformation(entry.getKey()));
-  } catch (YarnException ye) {
-dest.setResourceInformation(entry.getKey(),
-ResourceInformation.newInstance(entry.getValue()));
-  }
+  dest.setResourceInformation(entry.getKey(), entry.getValue());
 }
   }
 
@@ -234,8 +262,15 @@ public abstract class Resource implements 
Comparable {
*/
   @Public
   @Evolving
-  public abstract ResourceInformation getResourceInformation(String resource)
-  throws YarnException;
+  public ResourceInformation getResourceInformation(String 

[34/45] hadoop git commit: YARN-6892. [YARN-3926] Improve API implementation in Resources and DominantResourceCalculator class. Contributed by Sunil G.

2017-08-21 Thread wangda
YARN-6892. [YARN-3926] Improve API implementation in Resources and 
DominantResourceCalculator class. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5e93e18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5e93e18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5e93e18

Branch: refs/heads/YARN-3926
Commit: d5e93e18fd0bf579f37b0fc541fec1bdd1eb188d
Parents: b5bda80
Author: Sunil G 
Authored: Wed Aug 16 15:25:36 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |  70 +++-
 .../resource/DominantResourceCalculator.java| 317 ---
 .../hadoop/yarn/util/resource/Resources.java|  98 +++---
 3 files changed, 254 insertions(+), 231 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5e93e18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 332296e..1e9f213 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -164,7 +164,6 @@ public abstract class Resource implements 
Comparable {
 "This method is implemented by ResourcePBImpl");
   }
 
-
   /**
* Get number of virtual cpu cores of the resource.
* 
@@ -179,7 +178,7 @@ public abstract class Resource implements 
Comparable {
   @Public
   @Evolving
   public abstract int getVirtualCores();
-  
+
   /**
* Set number of virtual cpu cores of the resource.
* 
@@ -225,6 +224,27 @@ public abstract class Resource implements 
Comparable {
   }
 
   /**
+   * Get ResourceInformation for a specified resource from a given index.
+   *
+   * @param index
+   *  of the resource
+   * @return the ResourceInformation object for the resource
+   * @throws ResourceNotFoundException
+   *   if the resource can't be found
+   */
+  @Public
+  @Evolving
+  public ResourceInformation getResourceInformation(int index)
+  throws ResourceNotFoundException {
+ResourceInformation[] resources = getResources();
+if (index < 0 || index >= resources.length) {
+  throw new ResourceNotFoundException("Unknown resource at index '" + index
+  + "'. Vaid resources are: " + Arrays.toString(resources));
+}
+return resources[index];
+  }
+
+  /**
* Get the value for a specified resource. No information about the units is
* returned.
*
@@ -264,6 +284,29 @@ public abstract class Resource implements 
Comparable {
   }
 
   /**
+   * Set the ResourceInformation object for a particular resource.
+   *
+   * @param index
+   *  the resource index for which the ResourceInformation is provided
+   * @param resourceInformation
+   *  ResourceInformation object
+   * @throws ResourceNotFoundException
+   *   if the resource is not found
+   */
+  @Public
+  @Evolving
+  public void setResourceInformation(int index,
+  ResourceInformation resourceInformation)
+  throws ResourceNotFoundException {
+ResourceInformation[] resources = getResources();
+if (index < 0 || index >= resources.length) {
+  throw new ResourceNotFoundException("Unknown resource at index '" + index
+  + "'. Valid resources are " + Arrays.toString(resources));
+}
+ResourceInformation.copy(resourceInformation, resources[index]);
+  }
+
+  /**
* Set the value of a resource in the ResourceInformation object. The unit of
* the value is assumed to be the one in the ResourceInformation object.
*
@@ -288,6 +331,29 @@ public abstract class Resource implements 
Comparable {
 storedResourceInfo.setValue(value);
   }
 
+  /**
+   * Set the value of a resource in the ResourceInformation object. The unit of
+   * the value is assumed to be the one in the ResourceInformation object.
+   *
+   * @param index
+   *  the resource index for which the value is provided.
+   * @param value
+   *  the value to set
+   * @throws ResourceNotFoundException
+   *   if the resource is not found
+   */
+  @Public
+  @Evolving
+  public void setResourceValue(int index, long value)
+  throws ResourceNotFoundException {
+ResourceInformation[] resources = getResources();
+if (index < 0 || index >= resources.length) {
+

[24/45] hadoop git commit: YARN-5588. [YARN-3926] Add support for resource profiles in distributed shell. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-5588. [YARN-3926] Add support for resource profiles in distributed shell. 
Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6800253
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6800253
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6800253

Branch: refs/heads/YARN-3926
Commit: b6800253120388333028674f6252f660f8e4fc82
Parents: c9e1ed8
Author: Sunil G 
Authored: Mon Feb 27 21:44:14 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:55 2017 -0700

--
 .../yarn/api/records/ProfileCapability.java |  16 +-
 .../ResourceProfilesNotEnabledException.java|  43 +
 .../distributedshell/ApplicationMaster.java |  61 +--
 .../applications/distributedshell/Client.java   | 174 +++
 .../distributedshell/TestDistributedShell.java  |  29 
 .../yarn/client/api/impl/TestAMRMClient.java|   2 +-
 .../server/resourcemanager/ClientRMService.java |   4 +-
 .../resource/ResourceProfilesManagerImpl.java   |   6 +-
 .../scheduler/ClusterNodeTracker.java   |  12 +-
 9 files changed, 288 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6800253/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
index faaddd5..1a8d1c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
@@ -150,17 +150,21 @@ public abstract class ProfileCapability {
 .checkArgument(capability != null, "Capability cannot be null");
 Preconditions.checkArgument(resourceProfilesMap != null,
 "Resource profiles map cannot be null");
+Resource none = Resource.newInstance(0, 0);
 Resource resource = Resource.newInstance(0, 0);
-
-if (resourceProfilesMap.containsKey(capability.getProfileName())) {
-  resource = Resource
-  .newInstance(resourceProfilesMap.get(capability.getProfileName()));
+String profileName = capability.getProfileName();
+if (profileName.isEmpty()) {
+  profileName = DEFAULT_PROFILE;
+}
+if (resourceProfilesMap.containsKey(profileName)) {
+  resource = Resource.newInstance(resourceProfilesMap.get(profileName));
 }
 
-if(capability.getProfileCapabilityOverride()!= null) {
+if (capability.getProfileCapabilityOverride() != null &&
+!capability.getProfileCapabilityOverride().equals(none)) {
   for (Map.Entry entry : capability
   .getProfileCapabilityOverride().getResources().entrySet()) {
-if (entry.getValue() != null && entry.getValue().getValue() != 0) {
+if (entry.getValue() != null && entry.getValue().getValue() >= 0) {
   resource.setResourceInformation(entry.getKey(), entry.getValue());
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6800253/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java
new file mode 100644
index 000..558e075
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT 

[37/45] hadoop git commit: YARN-6994. [YARN-3926] Remove last uses of Long from resource types code. (Daniel Templeton via Yufei Gu)

2017-08-21 Thread wangda
YARN-6994. [YARN-3926] Remove last uses of Long from resource types code. 
(Daniel Templeton via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5bda802
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5bda802
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5bda802

Branch: refs/heads/YARN-3926
Commit: b5bda8022d5d137aa1078569ff0cae062f43e6c5
Parents: 984e990
Author: Yufei Gu 
Authored: Mon Aug 14 11:18:08 2017 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../main/java/org/apache/hadoop/yarn/api/records/Resource.java   | 4 ++--
 .../apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java   | 4 ++--
 .../java/org/apache/hadoop/yarn/util/resource/Resources.java | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5bda802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index a485a57..332296e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -273,14 +273,14 @@ public abstract class Resource implements 
Comparable {
*/
   @Public
   @Evolving
-  public void setResourceValue(String resource, Long value)
+  public void setResourceValue(String resource, long value)
   throws ResourceNotFoundException {
 if (resource.equals(MEMORY)) {
   this.setMemorySize(value);
   return;
 }
 if (resource.equals(VCORES)) {
-  this.setVirtualCores(value.intValue());
+  this.setVirtualCores((int)value);
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5bda802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 561deb3..cbb040a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 import java.util.Arrays;
 import java.util.Map;
@@ -174,7 +174,7 @@ public class ResourcePBImpl extends BaseResource {
   }
 
   @Override
-  public void setResourceValue(String resource, Long value)
+  public void setResourceValue(String resource, long value)
   throws ResourceNotFoundException {
 maybeInitBuilder();
 if (resource == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5bda802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index f62114d..3cf78ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -104,7 +104,7 @@ public class Resources {
 }
 
 @Override
-public void setResourceValue(String resource, Long value)
+public void setResourceValue(String resource, long value)
 throws ResourceNotFoundException {
   throw 

[33/45] hadoop git commit: YARN-6781. [YARN-3926] ResourceUtils#initializeResourcesMap takes an unnecessary Map parameter. Contributed by Yu-Tang Lin.

2017-08-21 Thread wangda
YARN-6781. [YARN-3926] ResourceUtils#initializeResourcesMap takes an 
unnecessary Map parameter. Contributed by Yu-Tang Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f461a2d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f461a2d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f461a2d4

Branch: refs/heads/YARN-3926
Commit: f461a2d420b1af204b79b52c555d51e447452efc
Parents: 176e5c8
Author: Sunil G 
Authored: Fri Aug 18 19:00:49 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../hadoop/yarn/util/resource/ResourceUtils.java  | 14 +++---
 .../hadoop/yarn/util/resource/TestResourceUtils.java  |  8 
 2 files changed, 11 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f461a2d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 5ed5712..997c2c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -201,9 +201,9 @@ public class ResourceUtils {
   }
 
   @VisibleForTesting
-  static void initializeResourcesMap(Configuration conf,
-  Map resourceInformationMap) {
+  static void initializeResourcesMap(Configuration conf) {
 
+Map resourceInformationMap = new HashMap<>();
 String[] resourceNames = conf.getStrings(YarnConfiguration.RESOURCE_TYPES);
 
 if (resourceNames != null && resourceNames.length != 0) {
@@ -339,19 +339,18 @@ public class ResourceUtils {
 if (!initializedResources) {
   synchronized (ResourceUtils.class) {
 if (!initializedResources) {
-  Map resources = new HashMap<>();
   if (conf == null) {
 conf = new YarnConfiguration();
   }
   try {
 addResourcesFileToConf(resourceFile, conf);
 LOG.debug("Found " + resourceFile + ", adding to configuration");
-initializeResourcesMap(conf, resources);
+initializeResourcesMap(conf);
 initializedResources = true;
   } catch (FileNotFoundException fe) {
 LOG.info("Unable to find '" + resourceFile
 + "'. Falling back to memory and vcores as resources", fe);
-initializeResourcesMap(conf, resources);
+initializeResourcesMap(conf);
 initializedResources = true;
   }
 }
@@ -414,11 +413,12 @@ public class ResourceUtils {
   }
 
   @VisibleForTesting
-  public static void resetResourceTypes(Configuration conf) {
+  public static Map
+  resetResourceTypes(Configuration conf) {
 synchronized (ResourceUtils.class) {
   initializedResources = false;
 }
-getResourceTypes(conf);
+return getResourceTypes(conf);
   }
 
   public static String getUnits(String resourceValue) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f461a2d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
index b530150..4e4671a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -183,8 +183,9 @@ public class TestResourceUtils {
 YarnConfiguration.RESOURCE_TYPES + "." + resources[0] + ".units";
 conf.set(name, resources[1]);
   }
-  Map ret = new HashMap<>();
-  ResourceUtils.initializeResourcesMap(conf, ret);
+  Map ret =
+  ResourceUtils.resetResourceTypes(conf);
+
   // for test1, 4 - length will be 1, 4
   // for the others, len will be 3
   int len 

[26/45] hadoop git commit: YARN-5587. Add support for resource profiles. (vvasudev via asuresh)

2017-08-21 Thread wangda
YARN-5587. Add support for resource profiles. (vvasudev via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9e1ed84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9e1ed84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9e1ed84

Branch: refs/heads/YARN-3926
Commit: c9e1ed84bce3c78c359a1876280c0eac23dcc049
Parents: f01f86c
Author: Arun Suresh 
Authored: Tue Nov 15 01:01:07 2016 -0800
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:55 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|   4 +
 .../RegisterApplicationMasterResponse.java  |   8 +
 .../yarn/api/records/ProfileCapability.java |  94 ++-
 .../hadoop/yarn/api/records/Resource.java   |  14 ++
 .../yarn/api/records/ResourceInformation.java   |  57 ++-
 .../yarn/api/records/ResourceRequest.java   |  43 -
 .../hadoop-yarn/hadoop-yarn-client/pom.xml  |   1 +
 .../hadoop/yarn/client/api/AMRMClient.java  | 117 +-
 .../yarn/client/api/impl/AMRMClientImpl.java| 152 ++---
 .../client/api/impl/RemoteRequestsTable.java| 109 +
 .../yarn/client/api/impl/TestAMRMClient.java| 141 ++--
 .../impl/TestAMRMClientContainerRequest.java|   8 +-
 .../api/impl/TestDistributedScheduling.java |  12 +-
 .../yarn/client/api/impl/TestNMClient.java  |   5 +-
 .../TestOpportunisticContainerAllocation.java   |  31 ++--
 .../src/test/resources/resource-profiles.json   |  18 +++
 ...RegisterApplicationMasterResponsePBImpl.java |  58 +++
 .../api/records/impl/pb/ResourcePBImpl.java |   4 +-
 .../records/impl/pb/ResourceRequestPBImpl.java  |  41 -
 .../yarn/util/resource/ResourceUtils.java   | 161 ++-
 .../hadoop/yarn/util/resource/Resources.java|  10 +-
 .../ApplicationMasterService.java   |   1 +
 .../resourcemanager/DefaultAMSProcessor.java|   8 +
 .../server/resourcemanager/RMServerUtils.java   |  50 ++
 .../resource/ResourceProfilesManagerImpl.java   |   4 +
 .../scheduler/AbstractYarnScheduler.java|  44 +
 .../scheduler/ClusterNodeTracker.java   |   3 +-
 .../scheduler/SchedulerUtils.java   |  10 ++
 .../scheduler/capacity/CapacityScheduler.java   |   4 +-
 .../scheduler/fair/FairScheduler.java   |   4 +-
 .../scheduler/fifo/FifoScheduler.java   |  13 +-
 .../yarn/server/resourcemanager/MockRM.java |   2 +
 .../server/resourcemanager/TestAppManager.java  |   1 +
 .../TestApplicationMasterService.java   |  35 
 .../scheduler/fair/TestFairScheduler.java   |   4 +
 .../hadoop/yarn/server/MiniYARNCluster.java |   2 +
 36 files changed, 1100 insertions(+), 173 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e1ed84/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6825a36..ce7a9c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -154,6 +154,10 @@
 
   
   
+
+
+  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e1ed84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
index 0b886dd..8fa8563 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
@@ -204,4 +204,12 @@ public abstract class RegisterApplicationMasterResponse {
   @Unstable
   public abstract void setSchedulerResourceTypes(
   EnumSet types);
+
+  @Public
+  @Unstable
+  public abstract Map getResourceProfiles();
+
+  @Private
+  @Unstable
+  public abstract void setResourceProfiles(Map profiles);
 }


[27/45] hadoop git commit: YARN-5586. Update the Resources class to consider all resource types. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-5586. Update the Resources class to consider all resource types. 
Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60b928e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60b928e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60b928e1

Branch: refs/heads/YARN-3926
Commit: 60b928e12adbcc9fe282ce3fcf328f541fab2534
Parents: 4b5d483
Author: Rohith Sharma K S 
Authored: Mon Sep 12 10:44:26 2016 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:55 2017 -0700

--
 .../api/records/impl/pb/ResourcePBImpl.java |   4 +-
 .../resource/DominantResourceCalculator.java|  36 ++--
 .../yarn/util/resource/ResourceUtils.java   |   3 +-
 .../hadoop/yarn/util/resource/Resources.java| 138 +++--
 .../yarn/util/resource/TestResourceUtils.java   |  23 +++
 .../yarn/util/resource/TestResources.java   | 207 +--
 .../resourcemanager/resource/TestResources.java |  43 
 7 files changed, 366 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60b928e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index b51121b..63b466b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -170,7 +170,9 @@ public class ResourcePBImpl extends Resource {
   resourceInformation.setName(resource);
 }
 initResources();
-resources.put(resource, resourceInformation);
+if (resources.containsKey(resource)) {
+  resources.put(resource, resourceInformation);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60b928e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 3c4413c..7db1da4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -183,8 +183,10 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 Long requiredResourceValue = UnitsConversionUtil
 .convert(requiredResource.getUnits(), availableResource.getUnits(),
 requiredResource.getValue());
-Long tmp = availableResource.getValue() / requiredResourceValue;
-min = min < tmp ? min : tmp;
+if (requiredResourceValue != 0) {
+  Long tmp = availableResource.getValue() / requiredResourceValue;
+  min = min < tmp ? min : tmp;
+}
   } catch (YarnException ye) {
 throw new IllegalArgumentException(
 "Error getting resource information for " + resource, ye);
@@ -301,10 +303,11 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 .convert(stepFactorResourceInformation.getUnits(),
 rResourceInformation.getUnits(),
 stepFactorResourceInformation.getValue());
-
-tmp.setValue(
-Math.min(roundUp(Math.max(rValue, minimumValue), stepFactorValue),
-maximumValue));
+Long value = Math.max(rValue, minimumValue);
+if (stepFactorValue != 0) {
+  value = roundUp(value, stepFactorValue);
+}
+tmp.setValue(Math.min(value, maximumValue));
 ret.setResourceInformation(resource, tmp);
   } catch (YarnException ye) {
 throw new IllegalArgumentException(
@@ -340,9 +343,11 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 .convert(stepFactorResourceInformation.getUnits(),
 rResourceInformation.getUnits(),
 

[05/45] hadoop git commit: HADOOP-14769. WASB: delete recursive should not fail if a file is deleted. Contributed by Thomas Marquardt

2017-08-21 Thread wangda
HADOOP-14769. WASB: delete recursive should not fail if a file is deleted.
Contributed by Thomas Marquardt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6b4e656
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6b4e656
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6b4e656

Branch: refs/heads/YARN-3926
Commit: c6b4e656b76b68cc1d0dbcc15a5aa5ea23335b7b
Parents: 99e558b
Author: Steve Loughran 
Authored: Fri Aug 18 14:13:40 2017 +0100
Committer: Steve Loughran 
Committed: Fri Aug 18 14:13:40 2017 +0100

--
 .../fs/azure/AzureNativeFileSystemStore.java| 21 ---
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 47 ---
 .../TestFileSystemOperationsWithThreads.java| 61 
 3 files changed, 86 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6b4e656/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 554027b..b0cd701 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2459,8 +2459,11 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 try {
   blob.delete(operationContext, lease);
 } catch (StorageException e) {
-  LOG.error("Encountered Storage Exception for delete on Blob: {}, 
Exception Details: {} Error Code: {}",
-  blob.getUri(), e.getMessage(), e.getErrorCode());
+  if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
+LOG.error("Encountered Storage Exception for delete on Blob: {}"
++ ", Exception Details: {} Error Code: {}",
+blob.getUri(), e.getMessage(), e.getErrorCode());
+  }
   // On exception, check that if:
   // 1. It's a BlobNotFound exception AND
   // 2. It got there after one-or-more retries THEN
@@ -2491,17 +2494,17 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 // Container doesn't exist, no need to do anything
 return true;
   }
-
   // Get the blob reference and delete it.
   CloudBlobWrapper blob = getBlobReference(key);
-  if (blob.exists(getInstrumentedContext())) {
-safeDelete(blob, lease);
-return true;
-  } else {
+  safeDelete(blob, lease);
+  return true;
+} catch (Exception e) {
+  if (e instanceof StorageException
+  && NativeAzureFileSystemHelper.isFileNotFoundException(
+  (StorageException) e)) {
+// the file or directory does not exist
 return false;
   }
-} catch (Exception e) {
-  // Re-throw as an Azure storage exception.
   throw new AzureException(e);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6b4e656/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index a7558a3..2abc6c6 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -2043,7 +2043,12 @@ public class NativeAzureFileSystem extends FileSystem {
   AzureFileSystemThreadTask task = new AzureFileSystemThreadTask() {
 @Override
 public boolean execute(FileMetadata file) throws IOException{
-  return deleteFile(file.getKey(), file.isDir());
+  if (!deleteFile(file.getKey(), file.isDir())) {
+LOG.warn("Attempt to delete non-existent {} {}",
+file.isDir() ? "directory" : "file",
+file.getKey());
+  }
+  return true;
 }
   };
 
@@ -2080,30 +2085,28 @@ public class NativeAzureFileSystem extends FileSystem {
 return new AzureFileSystemThreadPoolExecutor(threadCount, 
threadNamePrefix, operation, key, config);
   }
 
-  // Delete single file / directory from key.
+  /**
+   * Delete the specified file or directory and increment metrics.
+   * If the file or directory does not exist, the operation returns false.
+   * 

[41/45] hadoop git commit: YARN-7030. [YARN-3926] Performance optimizations in Resource and ResourceUtils class. Contributed by Wangda Tan.

2017-08-21 Thread wangda
YARN-7030. [YARN-3926] Performance optimizations in Resource and ResourceUtils 
class. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d6ca4b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d6ca4b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d6ca4b7

Branch: refs/heads/YARN-3926
Commit: 9d6ca4b7588cf8955cbc5f9d2cc43090d1fccb6b
Parents: a77d6d4
Author: Sunil G 
Authored: Thu Aug 17 21:14:51 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |  6 ++---
 .../yarn/api/records/impl/BaseResource.java | 10 +---
 .../yarn/util/resource/ResourceUtils.java   | 25 
 .../hadoop/yarn/util/resource/Resources.java| 16 ++---
 4 files changed, 39 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6ca4b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 1e9f213..04579c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -65,7 +65,7 @@ public abstract class Resource implements 
Comparable {
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {
-if (ResourceUtils.getResourceTypesArray().length > 2) {
+if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
   Resource ret = Records.newRecord(Resource.class);
   ret.setMemorySize(memory);
   ret.setVirtualCores(vCores);
@@ -77,7 +77,7 @@ public abstract class Resource implements 
Comparable {
   @Public
   @Stable
   public static Resource newInstance(long memory, int vCores) {
-if (ResourceUtils.getResourceTypesArray().length > 2) {
+if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
   Resource ret = Records.newRecord(Resource.class);
   ret.setMemorySize(memory);
   ret.setVirtualCores(vCores);
@@ -91,7 +91,7 @@ public abstract class Resource implements 
Comparable {
   public static Resource newInstance(Resource resource) {
 Resource ret = Resource.newInstance(resource.getMemorySize(),
 resource.getVirtualCores());
-if (ResourceUtils.getResourceTypesArray().length > 2) {
+if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
   Resource.copy(resource, ret);
 }
 return ret;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6ca4b7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/BaseResource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/BaseResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/BaseResource.java
index 83db542..b5cc4d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/BaseResource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/BaseResource.java
@@ -63,6 +63,11 @@ public class BaseResource extends Resource {
   protected ResourceInformation[] resources = null;
   protected ResourceInformation[] readOnlyResources = null;
 
+  // Number of mandatory resources, this is added to avoid invoke
+  // MandatoryResources.values().length, since values() internally will
+  // copy array, etc.
+  private static final int NUM_MANDATORY_RESOURCES = 2;
+
   protected enum MandatoryResources {
 MEMORY(0), VCORES(1);
 
@@ -86,9 +91,8 @@ public class BaseResource extends Resource {
 ResourceInformation.MEMORY_MB.getUnits(), memory);
 this.vcoresResInfo = ResourceInformation.newInstance(VCORES, "", vcores);
 
-resources = new ResourceInformation[MandatoryResources.values().length];
-readOnlyResources = new ResourceInformation[MandatoryResources
-.values().length];
+resources = new ResourceInformation[NUM_MANDATORY_RESOURCES];
+readOnlyResources = new ResourceInformation[NUM_MANDATORY_RESOURCES];
 resources[MandatoryResources.MEMORY.id] = memoryResInfo;
 resources[MandatoryResources.VCORES.id] = vcoresResInfo;
  

[06/45] hadoop git commit: YARN-7007. NPE in RM while using YarnClient.getApplications(). Contributed by Lingfeng Su.

2017-08-21 Thread wangda
YARN-7007. NPE in RM while using YarnClient.getApplications(). Contributed by 
Lingfeng Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e05fa345
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e05fa345
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e05fa345

Branch: refs/heads/YARN-3926
Commit: e05fa3451db343c0d22496b332910874b6be5b7f
Parents: c6b4e65
Author: bibinchundatt 
Authored: Fri Aug 18 20:28:50 2017 +0530
Committer: bibinchundatt 
Committed: Fri Aug 18 20:28:50 2017 +0530

--
 .../rmapp/attempt/RMAppAttemptMetrics.java   | 19 +++
 1 file changed, 11 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e05fa345/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
index e089050..0655609 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
@@ -31,6 +31,7 @@ import 
org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -125,14 +126,16 @@ public class RMAppAttemptMetrics {
 long vcoreSeconds = finishedVcoreSeconds.get();
 
 // Only add in the running containers if this is the active attempt.
-RMAppAttempt currentAttempt = rmContext.getRMApps()
-   .get(attemptId.getApplicationId()).getCurrentAppAttempt();
-if (currentAttempt.getAppAttemptId().equals(attemptId)) {
-  ApplicationResourceUsageReport appResUsageReport = rmContext
-.getScheduler().getAppResourceUsageReport(attemptId);
-  if (appResUsageReport != null) {
-memorySeconds += appResUsageReport.getMemorySeconds();
-vcoreSeconds += appResUsageReport.getVcoreSeconds();
+RMApp rmApp = rmContext.getRMApps().get(attemptId.getApplicationId());
+if (null != rmApp) {
+  RMAppAttempt currentAttempt = rmApp.getCurrentAppAttempt();
+  if (currentAttempt.getAppAttemptId().equals(attemptId)) {
+ApplicationResourceUsageReport appResUsageReport = rmContext
+.getScheduler().getAppResourceUsageReport(attemptId);
+if (appResUsageReport != null) {
+  memorySeconds += appResUsageReport.getMemorySeconds();
+  vcoreSeconds += appResUsageReport.getVcoreSeconds();
+}
   }
 }
 return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/45] hadoop git commit: HADOOP-14398. Modify documents for the FileSystem Builder API. (Lei (Eddy) Xu)

2017-08-21 Thread wangda
HADOOP-14398. Modify documents for the FileSystem Builder API. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99e558b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99e558b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99e558b1

Branch: refs/heads/YARN-3926
Commit: 99e558b13ba4d5832aea97374e1d07b4e78e5e39
Parents: 4230872
Author: Lei Xu 
Authored: Thu Aug 17 18:06:23 2017 -0700
Committer: Lei Xu 
Committed: Thu Aug 17 18:06:23 2017 -0700

--
 .../hadoop/fs/FSDataOutputStreamBuilder.java|  74 ++--
 .../src/site/markdown/filesystem/filesystem.md  |  33 +++-
 .../filesystem/fsdataoutputstreambuilder.md | 182 +++
 .../src/site/markdown/filesystem/index.md   |   1 +
 4 files changed, 272 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e558b1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 1f668eb..86c284a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -54,16 +54,29 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
  * options accordingly, for example:
  *
  * 
- * FSDataOutputStreamBuilder builder = fs.createFile(path);
- * builder.permission(perm)
+ *
+ * // Don't
+ * if (fs instanceof FooFileSystem) {
+ *   FooFileSystem fs = (FooFileSystem) fs;
+ *   OutputStream out = dfs.createFile(path)
+ * .optionA()
+ * .optionB("value")
+ * .cache()
+ *   .build()
+ * } else if (fs instanceof BarFileSystem) {
+ *   ...
+ * }
+ *
+ * // Do
+ * OutputStream out = fs.createFile(path)
+ *   .permission(perm)
  *   .bufferSize(bufSize)
- *   .opt("dfs.outputstream.builder.lazy-persist", true)
- *   .opt("dfs.outputstream.builder.ec.policy-name", "rs-3-2-64k")
- *   .opt("fs.local.o-direct", true)
- *   .must("fs.s3a.fast-upload", true)
- *   .must("fs.azure.buffer-size", 256 * 1024 * 1024);
- * FSDataOutputStream out = builder.build();
- * ...
+ *   .opt("foofs:option.a", true)
+ *   .opt("foofs:option.b", "value")
+ *   .opt("barfs:cache", true)
+ *   .must("foofs:cache", true)
+ *   .must("barfs:cache-size", 256 * 1024 * 1024)
+ *   .build();
  * 
  *
  * If the option is not related to the file system, the option will be ignored.
@@ -263,6 +276,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional boolean parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, boolean value) {
 mandatoryKeys.remove(key);
@@ -272,6 +287,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional int parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, int value) {
 mandatoryKeys.remove(key);
@@ -281,6 +298,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional float parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, float value) {
 mandatoryKeys.remove(key);
@@ -290,6 +309,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional double parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, double value) {
 mandatoryKeys.remove(key);
@@ -299,6 +320,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set an array of string values as optional parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, @Nonnull final String... values) {
 mandatoryKeys.remove(key);
@@ -310,8 +333,7 @@ public abstract class FSDataOutputStreamBuilder
* Set mandatory option to the Builder.
*
* If the option is not supported or unavailable on the {@link FileSystem},
-   * the client should expect {@link #build()} throws
-   * {@link IllegalArgumentException}.
+   * the client should expect {@link #build()} throws IllegalArgumentException.
*/
   public B must(@Nonnull final String key, @Nonnull final String value) {
 mandatoryKeys.add(key);
@@ -319,35 +341,55 @@ public abstract class FSDataOutputStreamBuilder
 return getThisBuilder();
   }
 
-  /** Set mandatory boolean option. */
+  /**
+   * 

[43/45] hadoop git commit: YARN-6786. [YARN-3926] ResourcePBImpl imports cleanup. Contributed by Yeliang Cang.

2017-08-21 Thread wangda
YARN-6786. [YARN-3926] ResourcePBImpl imports cleanup. Contributed by Yeliang 
Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7a9c803
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7a9c803
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7a9c803

Branch: refs/heads/YARN-3926
Commit: f7a9c8039586ff8e40b093c48302e78cc2da05b2
Parents: e757abd
Author: Sunil G 
Authored: Thu Jul 13 16:30:59 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7a9c803/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index a9abed9..7bc7f5f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.api.records.impl.pb;
 
-import org.apache.commons.collections.map.UnmodifiableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -34,7 +33,10 @@ import 
org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 
-import java.util.*;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Collections;
+
 
 @Private
 @Unstable


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/45] hadoop git commit: Revert "HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure durations. Contributed by Hanisha Koneru."

2017-08-21 Thread wangda
Revert "HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure 
durations. Contributed by Hanisha Koneru."

This reverts commit 8bef4eca28a3466707cc4ea0de0330449319a5eb.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d105a20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d105a20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d105a20

Branch: refs/heads/YARN-3926
Commit: 2d105a206884b62ccdba61f2de3e2fe65fc43074
Parents: e05fa34
Author: Arpit Agarwal 
Authored: Fri Aug 18 10:15:52 2017 -0700
Committer: Arpit Agarwal 
Committed: Fri Aug 18 10:15:52 2017 -0700

--
 .../java/org/apache/hadoop/ipc/ProtobufRpcEngine.java | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d105a20/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 2c0cfe5..639bbad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -190,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = Time.monotonicNow();
+startTime = Time.now();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -245,7 +245,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = Time.monotonicNow() - startTime;
+long callTime = Time.now() - startTime;
 LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
   }
   
@@ -373,19 +373,19 @@ public class ProtobufRpcEngine implements RpcEngine {
 this.server = currentCallInfo.get().server;
 this.call = Server.getCurCall().get();
 this.methodName = currentCallInfo.get().methodName;
-this.setupTime = Time.monotonicNow();
+this.setupTime = Time.now();
   }
 
   @Override
   public void setResponse(Message message) {
-long processingTime = Time.monotonicNow() - setupTime;
+long processingTime = Time.now() - setupTime;
 call.setDeferredResponse(RpcWritable.wrap(message));
 server.updateDeferredMetrics(methodName, processingTime);
   }
 
   @Override
   public void error(Throwable t) {
-long processingTime = Time.monotonicNow() - setupTime;
+long processingTime = Time.now() - setupTime;
 String detailedMetricsName = t.getClass().getSimpleName();
 server.updateDeferredMetrics(detailedMetricsName, processingTime);
 call.setDeferredError(t);
@@ -513,7 +513,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 Message param = request.getValue(prototype);
 
 Message result;
-long startTime = Time.monotonicNow();
+long startTime = Time.now();
 int qTime = (int) (startTime - receiveTime);
 Exception exception = null;
 boolean isDeferred = false;
@@ -537,7 +537,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   throw e;
 } finally {
   currentCallInfo.set(null);
-  int processingTime = (int) (Time.monotonicNow() - startTime);
+  int processingTime = (int) (Time.now() - startTime);
   if (LOG.isDebugEnabled()) {
 String msg =
 "Served: " + methodName + (isDeferred ? ", deferred" : "") +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/45] hadoop git commit: YARN-4829. Add support for binary units in Resource class.(vvasudev via asuresh)

2017-08-21 Thread wangda
YARN-4829. Add support for binary units in Resource class.(vvasudev via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52ba61a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52ba61a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52ba61a9

Branch: refs/heads/YARN-3926
Commit: 52ba61a92239a3f68b1455d7a71e4e3714de5b05
Parents: cf68c1a
Author: Arun Suresh 
Authored: Thu Mar 17 23:50:22 2016 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:54 2017 -0700

--
 .../yarn/api/records/ResourceInformation.java   |  2 +-
 .../hadoop/yarn/util/UnitsConversionUtil.java   | 45 ++--
 .../yarn/util/TestUnitsConversionUtil.java  | 17 +++-
 .../api/records/impl/pb/ResourcePBImpl.java |  2 +-
 .../yarn/util/resource/TestResourceUtils.java   |  2 +-
 5 files changed, 52 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52ba61a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 80e3192..a17e81b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -36,7 +36,7 @@ public class ResourceInformation implements 
Comparable {
   private static final String VCORES_URI = "vcores";
 
   public static final ResourceInformation MEMORY_MB =
-  ResourceInformation.newInstance(MEMORY_URI, "M");
+  ResourceInformation.newInstance(MEMORY_URI, "Mi");
   public static final ResourceInformation VCORES =
   ResourceInformation.newInstance(VCORES_URI);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52ba61a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
index 7785263..47bb3df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
@@ -46,7 +46,8 @@ public class UnitsConversionUtil {
   }
 
   private static final String[] UNITS =
-  {"p", "n", "u", "m", "", "k", "M", "G", "T", "P"};
+  { "p", "n", "u", "m", "", "k", "M", "G", "T", "P", "Ki", "Mi", "Gi", 
"Ti",
+  "Pi" };
   private static final List SORTED_UNITS = Arrays.asList(UNITS);
   public static final Set KNOWN_UNITS = createKnownUnitsSet();
   private static final Converter PICO =
@@ -65,6 +66,15 @@ public class UnitsConversionUtil {
   private static final Converter PETA =
   new Converter(1000L * 1000L * 1000L * 1000L * 1000L, 1L);
 
+  private static final Converter KILO_BINARY = new Converter(1024L, 1L);
+  private static final Converter MEGA_BINARY = new Converter(1024L * 1024L, 
1L);
+  private static final Converter GIGA_BINARY =
+  new Converter(1024L * 1024L * 1024L, 1L);
+  private static final Converter TERA_BINARY =
+  new Converter(1024L * 1024L * 1024L * 1024L, 1L);
+  private static final Converter PETA_BINARY =
+  new Converter(1024L * 1024L * 1024L * 1024L * 1024L, 1L);
+
   private static Set createKnownUnitsSet() {
 Set ret = new HashSet<>();
 ret.addAll(Arrays.asList(UNITS));
@@ -93,6 +103,16 @@ public class UnitsConversionUtil {
   return TERA;
 case "P":
   return PETA;
+case "Ki":
+  return KILO_BINARY;
+case "Mi":
+  return MEGA_BINARY;
+case "Gi":
+  return GIGA_BINARY;
+case "Ti":
+  return TERA_BINARY;
+case "Pi":
+  return PETA_BINARY;
 default:
   throw new IllegalArgumentException(
   "Unknown unit '" + unit + "'. Known units are " + KNOWN_UNITS);
@@ -112,28 +132,29 @@ public class UnitsConversionUtil {
 if (toUnit == null || fromUnit == null || fromValue == null) {
   throw new IllegalArgumentException("One or more arguments are null");
 }
-Long tmp;
 String overflowMsg =
 

[10/45] hadoop git commit: YARN-6979. Add flag to notify all types of container updates to NM via NodeHeartbeatResponse. (Kartheek Muthyala via asuresh)

2017-08-21 Thread wangda
YARN-6979. Add flag to notify all types of container updates to NM via 
NodeHeartbeatResponse. (Kartheek Muthyala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8410d862
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8410d862
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8410d862

Branch: refs/heads/YARN-3926
Commit: 8410d862d3a72740f461ef91dddb5325955e1ca5
Parents: 436c263
Author: Arun Suresh 
Authored: Sun Aug 20 07:54:09 2017 -0700
Committer: Arun Suresh 
Committed: Sun Aug 20 07:54:09 2017 -0700

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   2 +-
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../src/main/resources/yarn-default.xml |   8 +
 .../protocolrecords/NodeHeartbeatResponse.java  |   6 +-
 .../impl/pb/NodeHeartbeatResponsePBImpl.java|  42 ++---
 .../yarn_server_common_service_protos.proto |   3 +
 .../hadoop/yarn/TestYarnServerApiClasses.java   |   6 +-
 .../nodemanager/NodeStatusUpdaterImpl.java  |   2 +-
 .../containermanager/ContainerManagerImpl.java  |  35 ++--
 .../scheduler/ContainerScheduler.java   |   1 +
 .../resourcemanager/ResourceTrackerService.java |   2 +-
 .../rmcontainer/RMContainerImpl.java|   8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   6 +-
 .../rmnode/RMNodeDecreaseContainerEvent.java|  39 -
 .../resourcemanager/rmnode/RMNodeEventType.java |   2 +-
 .../resourcemanager/rmnode/RMNodeImpl.java  |  29 ++--
 .../rmnode/RMNodeUpdateContainerEvent.java  |  44 +
 .../scheduler/AbstractYarnScheduler.java|  11 ++
 .../scheduler/SchedulerApplicationAttempt.java  |  39 +++--
 .../yarn/server/resourcemanager/MockNodes.java  |   2 +-
 ...pportunisticContainerAllocatorAMService.java | 168 +++
 .../capacity/TestContainerResizing.java |   7 +-
 .../capacity/TestIncreaseAllocationExpirer.java |   4 +-
 24 files changed, 346 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8410d862/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 8962aba..e71ddff 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -179,7 +179,7 @@ public class NodeInfo {
 }
 
 @Override
-public void updateNodeHeartbeatResponseForContainersDecreasing(
+public void updateNodeHeartbeatResponseForUpdatedContainers(
 NodeHeartbeatResponse response) {
   // TODO Auto-generated method stub
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8410d862/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index d7b159c..6b7ac3c 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -168,7 +168,7 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
-  public void updateNodeHeartbeatResponseForContainersDecreasing(
+  public void updateNodeHeartbeatResponseForUpdatedContainers(
   NodeHeartbeatResponse response) {
 // TODO Auto-generated method stub
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8410d862/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8515e0a..86f45b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -167,6 +167,10 @@ public class YarnConfiguration 

[11/45] hadoop git commit: YARN-6979. [Addendum patch] Fixed classname and added javadocs. (Kartheek Muthyala via asuresh)

2017-08-21 Thread wangda
YARN-6979. [Addendum patch] Fixed classname and added javadocs. (Kartheek 
Muthyala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a82d7bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a82d7bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a82d7bc

Branch: refs/heads/YARN-3926
Commit: 7a82d7bcea8124e1b65c275fac15bf2047d17471
Parents: 8410d86
Author: Arun Suresh 
Authored: Sun Aug 20 08:55:13 2017 -0700
Committer: Arun Suresh 
Committed: Sun Aug 20 10:24:05 2017 -0700

--
 .../CMgrDecreaseContainersResourceEvent.java| 37 ---
 .../nodemanager/CMgrUpdateContainersEvent.java  | 48 
 .../nodemanager/ContainerManagerEventType.java  |  2 +-
 .../nodemanager/NodeStatusUpdaterImpl.java  |  8 ++--
 .../containermanager/ContainerManagerImpl.java  | 10 ++--
 5 files changed, 57 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a82d7bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
deleted file mode 100644
index 9479d0b..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.nodemanager;
-
-import org.apache.hadoop.yarn.api.records.Container;
-import java.util.List;
-
-public class CMgrDecreaseContainersResourceEvent extends ContainerManagerEvent 
{
-
-  private final List containersToDecrease;
-
-  public CMgrDecreaseContainersResourceEvent(List
-  containersToDecrease) {
-super(ContainerManagerEventType.DECREASE_CONTAINERS_RESOURCE);
-this.containersToDecrease = containersToDecrease;
-  }
-
-  public List getContainersToDecrease() {
-return this.containersToDecrease;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a82d7bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
new file mode 100644
index 000..5e41701
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is 

[14/45] hadoop git commit: HDFS-12325. SFTPFileSystem operations should restore cwd. Contributed by Chen Liang.

2017-08-21 Thread wangda
HDFS-12325. SFTPFileSystem operations should restore cwd. Contributed by Chen 
Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/736ceab2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/736ceab2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/736ceab2

Branch: refs/heads/YARN-3926
Commit: 736ceab2f58fb9ab5907c5b5110bd44384038e6b
Parents: 913760c
Author: Arpit Agarwal 
Authored: Sun Aug 20 23:41:06 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 21 11:48:51 2017 -0700

--
 .../main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java| 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/736ceab2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
index 421769d..43eb783 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -326,8 +326,10 @@ public class SFTPFileSystem extends FileSystem {
 String parentDir = parent.toUri().getPath();
 boolean succeeded = true;
 try {
+  final String previousCwd = client.pwd();
   client.cd(parentDir);
   client.mkdir(pathName);
+  client.cd(previousCwd);
 } catch (SftpException e) {
   throw new IOException(String.format(E_MAKE_DIR_FORPATH, pathName,
   parentDir));
@@ -474,8 +476,10 @@ public class SFTPFileSystem extends FileSystem {
 }
 boolean renamed = true;
 try {
+  final String previousCwd = channel.pwd();
   channel.cd("/");
   channel.rename(src.toUri().getPath(), dst.toUri().getPath());
+  channel.cd(previousCwd);
 } catch (SftpException e) {
   renamed = false;
 }
@@ -558,8 +562,10 @@ public class SFTPFileSystem extends FileSystem {
 }
 OutputStream os;
 try {
+  final String previousCwd = client.pwd();
   client.cd(parent.toUri().getPath());
   os = client.put(f.getName());
+  client.cd(previousCwd);
 } catch (SftpException e) {
   throw new IOException(e);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/45] hadoop git commit: HDFS-12316. Verify HDFS snapshot deletion doesn't crash the ongoing file writes.

2017-08-21 Thread wangda
HDFS-12316. Verify HDFS snapshot deletion doesn't crash the ongoing file writes.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4230872d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4230872d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4230872d

Branch: refs/heads/YARN-3926
Commit: 4230872dd66d748172903b1522885b03f34bbf9b
Parents: b298948
Author: Manoj Govindassamy 
Authored: Thu Aug 17 16:23:48 2017 -0700
Committer: Manoj Govindassamy 
Committed: Thu Aug 17 16:23:48 2017 -0700

--
 .../snapshot/TestOpenFilesWithSnapshot.java | 109 +++
 1 file changed, 109 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4230872d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index fb83a3e..bf27f2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -23,7 +23,11 @@ import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
@@ -38,12 +42,15 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 public class TestOpenFilesWithSnapshot {
+  private static final Log LOG =
+  LogFactory.getLog(TestOpenFilesWithSnapshot.class.getName());
   private final Configuration conf = new Configuration();
   MiniDFSCluster cluster = null;
   DistributedFileSystem fs = null;
@@ -622,6 +629,108 @@ public class TestOpenFilesWithSnapshot {
 hbaseOutputStream.close();
   }
 
+  /**
+   * Test client writing to open files are not interrupted when snapshots
+   * that captured open files get deleted.
+   */
+  @Test (timeout = 24)
+  public void testOpenFileWritingAcrossSnapDeletion() throws Exception {
+final Path snapRootDir = new Path("/level_0_A");
+final String flumeFileName = "flume.log";
+final String hbaseFileName = "hbase.log";
+final String snap1Name = "snap_1";
+final String snap2Name = "snap_2";
+final String snap3Name = "snap_3";
+
+// Create files and open streams
+final Path flumeFile = new Path(snapRootDir, flumeFileName);
+FSDataOutputStream flumeOut = fs.create(flumeFile, false,
+8000, (short)3, 1048576);
+flumeOut.close();
+final Path hbaseFile = new Path(snapRootDir, hbaseFileName);
+FSDataOutputStream hbaseOut = fs.create(hbaseFile, false,
+8000, (short)3, 1048576);
+hbaseOut.close();
+
+final AtomicBoolean writerError = new AtomicBoolean(false);
+final CountDownLatch startLatch = new CountDownLatch(1);
+final CountDownLatch deleteLatch = new CountDownLatch(1);
+Thread t = new Thread(new Runnable() {
+  @Override
+  public void run() {
+try {
+  FSDataOutputStream flumeOutputStream = fs.append(flumeFile, 8000);
+  FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile, 8000);
+  byte[] bytes = new byte[(int) (1024 * 0.2)];
+  Random r = new Random(Time.now());
+
+  for (int i = 0; i < 20; i++) {
+r.nextBytes(bytes);
+flumeOutputStream.write(bytes);
+if (hbaseOutputStream != null) {
+  hbaseOutputStream.write(bytes);
+}
+if (i == 5) {
+  startLatch.countDown();
+} else if (i == 10) {
+  deleteLatch.countDown();
+} else if (i == 15) {
+  hbaseOutputStream.hsync();
+  fs.delete(hbaseFile, true);
+  try {
+hbaseOutputStream.close();
+  } catch (Exception e) {
+  

[28/45] hadoop git commit: YARN-5707. Add manager class for resource profiles. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-5707. Add manager class for resource profiles. Contributed by Varun 
Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9291d6d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9291d6d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9291d6d6

Branch: refs/heads/YARN-3926
Commit: 9291d6d6f70726950528a146662b4c4ffe0ad069
Parents: 60b928e
Author: Varun Vasudev 
Authored: Sat Oct 8 19:43:33 2016 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:55 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  23 +++
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../src/main/resources/yarn-default.xml |  16 ++
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   5 +
 .../resource/ResourceProfilesManager.java   |  46 +
 .../resource/ResourceProfilesManagerImpl.java   | 176 +++
 .../resource/TestResourceProfiles.java  | 142 +++
 .../resources/profiles/illegal-profiles-1.json  |  10 ++
 .../resources/profiles/illegal-profiles-2.json  |  10 ++
 .../resources/profiles/illegal-profiles-3.json  |  10 ++
 .../resources/profiles/sample-profiles-1.json   |  14 ++
 .../resources/profiles/sample-profiles-2.json   |  26 +++
 12 files changed, 482 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9291d6d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 48e6a3a..90c6e00 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -879,6 +879,29 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser.";
 
   /**
+   * Enable/disable resource profiles.
+   */
+  @Public
+  @Unstable
+  public static final String RM_RESOURCE_PROFILES_ENABLED =
+  RM_PREFIX + "resource-profiles.enabled";
+  @Public
+  @Unstable
+  public static final boolean DEFAULT_RM_RESOURCE_PROFILES_ENABLED = false;
+
+  /**
+   * File containing resource profiles.
+   */
+  @Public
+  @Unstable
+  public static final String RM_RESOURCE_PROFILES_SOURCE_FILE =
+  RM_PREFIX + "resource-profiles.source-file";
+  @Public
+  @Unstable
+  public static final String DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE =
+  "resource-profiles.json";
+
+  /**
* Timeout in seconds for YARN node graceful decommission.
* This is the maximal time to wait for running containers and applications
* to complete before transition a DECOMMISSIONING node into DECOMMISSIONED.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9291d6d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index c40c2c5..3c2900c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -139,6 +139,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 // Used as Java command line properties, not XML
 configurationPrefixToSkipCompare.add("yarn.app.container");
 
+// Ignore default file name for resource profiles
+configurationPropsToSkipCompare
+.add(YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE);
+
 // Ignore NodeManager "work in progress" variables
 configurationPrefixToSkipCompare
 .add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9291d6d6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 

[16/45] hadoop git commit: YARN-5242. Update DominantResourceCalculator to consider all resource types in calculations. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-5242. Update DominantResourceCalculator to consider all resource types in 
calculations. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b5d483f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b5d483f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b5d483f

Branch: refs/heads/YARN-3926
Commit: 4b5d483f0dfacbea47b20d64c7ec4073957f07af
Parents: 6c94e75
Author: Rohith Sharma K S 
Authored: Tue Jul 26 14:13:03 2016 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:54 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |  7 ++
 .../api/records/impl/pb/ResourcePBImpl.java |  2 +-
 .../resource/DominantResourceCalculator.java| 23 
 .../yarn/util/resource/ResourceUtils.java   |  5 +++--
 .../hadoop/yarn/util/resource/Resources.java|  6 +
 5 files changed, 31 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b5d483f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index c9c6a7a..507247e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -327,6 +327,8 @@ public abstract class Resource implements 
Comparable {
 otherResources = other.getResources();
 long diff = thisResources.size() - otherResources.size();
 if (diff == 0) {
+  // compare memory and vcores first(in that order) to preserve
+  // existing behaviour
   if (thisResources.keySet().equals(otherResources.keySet())) {
 diff = this.getMemorySize() - other.getMemorySize();
 if (diff == 0) {
@@ -335,6 +337,11 @@ public abstract class Resource implements 
Comparable {
 if (diff == 0) {
   for (Map.Entry entry : thisResources
   .entrySet()) {
+if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
+|| entry.getKey()
+.equals(ResourceInformation.VCORES.getName())) {
+  continue;
+}
 diff =
 entry.getValue().compareTo(otherResources.get(entry.getKey()));
 if (diff != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b5d483f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 86ae41f..b51121b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -242,7 +242,7 @@ public class ResourcePBImpl extends Resource {
 builder.addResourceValueMap(e);
   }
 }
-builder.setMemory(this.getMemory());
+builder.setMemory(this.getMemorySize());
 builder.setVirtualCores(this.getVirtualCores());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b5d483f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 0412c0f..3c4413c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -397,10 +397,25 @@ public class DominantResourceCalculator extends 

[20/45] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-08-21 Thread wangda
YARN-4081. Add support for multiple resource types in the Resource class. 
(Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64439e4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64439e4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64439e4c

Branch: refs/heads/YARN-3926
Commit: 64439e4ca696a96da74043a0c937ec4dcb7647d7
Parents: b6bfb2f
Author: Wangda Tan 
Authored: Thu Sep 10 09:43:26 2015 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:54 2017 -0700

--
 .../src/main/resources/META-INF/LICENSE.txt | 1661 ++
 .../src/main/resources/META-INF/NOTICE.txt  |  283 +++
 .../yarn/api/protocolrecords/ResourceTypes.java |   27 +
 .../hadoop/yarn/api/records/Resource.java   |  205 ++-
 .../yarn/api/records/ResourceInformation.java   |  218 +++
 .../exceptions/ResourceNotFoundException.java   |   45 +
 .../hadoop/yarn/util/UnitsConversionUtil.java   |  197 +++
 .../src/main/proto/yarn_protos.proto|   12 +
 .../yarn/conf/TestResourceInformation.java  |   70 +
 .../yarn/util/TestUnitsConversionUtil.java  |  120 ++
 .../yarn/api/records/impl/pb/ProtoUtils.java|   13 +
 .../api/records/impl/pb/ResourcePBImpl.java |  193 +-
 .../hadoop/yarn/util/resource/Resources.java|  137 +-
 .../hadoop/yarn/api/TestPBImplRecords.java  |4 +
 14 files changed, 3104 insertions(+), 81 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/45] hadoop git commit: YARN-6789. Add Client API to get all supported resource types from RM. (Sunil G via wangda)

2017-08-21 Thread wangda
YARN-6789. Add Client API to get all supported resource types from RM. (Sunil G 
via wangda)

Change-Id: I366d8db6f6700acd087db5acb7a1be7e41b2b68d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/176e5c8f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/176e5c8f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/176e5c8f

Branch: refs/heads/YARN-3926
Commit: 176e5c8fbd1f6cd60c6d1619dc4cf54b74afaf39
Parents: b073c6f
Author: Wangda Tan 
Authored: Thu Aug 17 11:30:41 2017 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   7 +
 .../hadoop/mapred/TestClientRedirect.java   |   9 +
 .../yarn/api/ApplicationClientProtocol.java |  17 +-
 .../GetAllResourceTypeInfoRequest.java  |  35 
 .../GetAllResourceTypeInfoResponse.java |  60 ++
 .../yarn/api/records/ResourceTypeInfo.java  | 196 +++
 .../yarn/util/resource/ResourceUtils.java   |  13 ++
 .../main/proto/applicationclient_protocol.proto |   1 +
 .../src/main/proto/yarn_protos.proto|   6 +
 .../src/main/proto/yarn_service_protos.proto|   7 +
 .../hadoop/yarn/client/api/YarnClient.java  |  19 ++
 .../yarn/client/api/impl/YarnClientImpl.java|  14 +-
 .../ApplicationClientProtocolPBClientImpl.java  |  18 ++
 .../ApplicationClientProtocolPBServiceImpl.java |  21 ++
 .../pb/GetAllResourceTypeInfoRequestPBImpl.java |  70 +++
 .../GetAllResourceTypeInfoResponsePBImpl.java   | 184 +
 .../api/records/impl/pb/ResourcePBImpl.java |  12 +-
 .../records/impl/pb/ResourceTypeInfoPBImpl.java | 154 +++
 .../hadoop/yarn/api/TestPBImplRecords.java  |  25 +++
 .../hadoop/yarn/api/TestResourcePBImpl.java |  61 ++
 .../yarn/server/MockResourceManagerFacade.java  |   8 +
 .../server/resourcemanager/ClientRMService.java |  15 +-
 .../resource/ResourceProfilesManager.java   |   8 +
 .../resource/ResourceProfilesManagerImpl.java   |  46 -
 .../resourcemanager/TestClientRMService.java|  46 +
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 .../DefaultClientRequestInterceptor.java|   8 +
 .../clientrm/FederationClientInterceptor.java   |   8 +
 .../router/clientrm/RouterClientRMService.java  |   9 +
 .../PassThroughClientRequestInterceptor.java|   8 +
 30 files changed, 1074 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/176e5c8f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index a365f80..94f741a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
 import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -530,4 +531,10 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 return client.getResourceProfile(profile);
   }
+
+  @Override
+  public List getResourceTypeInfo()
+  throws YarnException, IOException {
+return client.getResourceTypeInfo();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/176e5c8f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 

[08/45] hadoop git commit: YARN-6969. Clean up unused code in class FairSchedulerQueueInfo. (Larry Lo via Yufei Gu)

2017-08-21 Thread wangda
YARN-6969. Clean up unused code in class FairSchedulerQueueInfo. (Larry Lo via 
Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8991f0ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8991f0ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8991f0ba

Branch: refs/heads/YARN-3926
Commit: 8991f0baec62625c45144e2544066195800ab95b
Parents: 2d105a2
Author: Yufei Gu 
Authored: Fri Aug 18 14:38:44 2017 -0700
Committer: Yufei Gu 
Committed: Fri Aug 18 14:38:44 2017 -0700

--
 .../webapp/dao/FairSchedulerQueueInfo.java | 17 -
 1 file changed, 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8991f0ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index 79339c7..913513c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -48,8 +48,6 @@ public class FairSchedulerQueueInfo {
   @XmlTransient
   private float fractionMemFairShare;
   @XmlTransient
-  private float fractionMemMinShare;
-  @XmlTransient
   private float fractionMemMaxShare;
   
   private ResourceInfo minResources;
@@ -63,7 +61,6 @@ public class FairSchedulerQueueInfo {
   private ResourceInfo clusterResources;
   private ResourceInfo reservedResources;
 
-  private long pendingContainers;
   private long allocatedContainers;
   private long reservedContainers;
 
@@ -108,12 +105,10 @@ public class FairSchedulerQueueInfo {
 (float)steadyFairResources.getMemorySize() / 
clusterResources.getMemorySize();
 fractionMemFairShare = (float) fairResources.getMemorySize()
 / clusterResources.getMemorySize();
-fractionMemMinShare = (float)minResources.getMemorySize() / 
clusterResources.getMemorySize();
 fractionMemMaxShare = (float)maxResources.getMemorySize() / 
clusterResources.getMemorySize();
 
 maxApps = queue.getMaxRunningApps();
 
-pendingContainers = queue.getMetrics().getPendingContainers();
 allocatedContainers = queue.getMetrics().getAllocatedContainers();
 reservedContainers = queue.getMetrics().getReservedContainers();
 
@@ -126,10 +121,6 @@ public class FairSchedulerQueueInfo {
 childQueues = getChildQueues(queue, scheduler);
   }
 
-  public long getPendingContainers() {
-return pendingContainers;
-  }
-
   public long getAllocatedContainers() {
 return allocatedContainers;
   }
@@ -234,14 +225,6 @@ public class FairSchedulerQueueInfo {
   }
 
   /**
-   * Returns the queue's min share in as a fraction of the entire
-   * cluster capacity.
-   */
-  public float getMinShareMemoryFraction() {
-return fractionMemMinShare;
-  }
-  
-  /**
* Returns the memory used by this queue as a fraction of the entire 
* cluster capacity.
*/


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/45] hadoop git commit: HDFS-11988. Verify HDFS Snapshots with open files captured are consistent across truncates and appends to current version file.

2017-08-21 Thread wangda
HDFS-11988. Verify HDFS Snapshots with open files captured are consistent 
across truncates and appends to current version file.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/913760cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/913760cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/913760cb

Branch: refs/heads/YARN-3926
Commit: 913760cb4fe7123e55004800f75dc00540a79f69
Parents: 267e19a
Author: Manoj Govindassamy 
Authored: Mon Aug 21 11:08:38 2017 -0700
Committer: Manoj Govindassamy 
Committed: Mon Aug 21 11:08:38 2017 -0700

--
 .../snapshot/TestOpenFilesWithSnapshot.java | 112 +++
 1 file changed, 112 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/913760cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index bf27f2c..537612c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
@@ -731,6 +732,117 @@ public class TestOpenFilesWithSnapshot {
 cluster.waitActive();
   }
 
+  /**
+   * Verify snapshots with open files captured are safe even when the
+   * 'current' version of the file is truncated and appended later.
+   */
+  @Test (timeout = 12)
+  public void testOpenFilesSnapChecksumWithTrunkAndAppend() throws Exception {
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
+true);
+// Construct the directory tree
+final Path dir = new Path("/A/B/C");
+fs.mkdirs(dir);
+
+// String constants
+final Path hbaseSnapRootDir = dir;
+final String hbaseFileName = "hbase.wal";
+final String hbaseSnap1Name = "hbase_snap_s1";
+final String hbaseSnap2Name = "hbase_snap_s2";
+final String hbaseSnap3Name = "hbase_snap_s3";
+final String hbaseSnap4Name = "hbase_snap_s4";
+
+// Create files and open a stream
+final Path hbaseFile = new Path(dir, hbaseFileName);
+createFile(hbaseFile);
+final FileChecksum hbaseWALFileCksum0 =
+fs.getFileChecksum(hbaseFile);
+FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile);
+
+// Create Snapshot S1
+final Path hbaseS1Dir = SnapshotTestHelper.createSnapshot(
+fs, hbaseSnapRootDir, hbaseSnap1Name);
+final Path hbaseS1Path = new Path(hbaseS1Dir, hbaseFileName);
+final FileChecksum hbaseFileCksumS1 = fs.getFileChecksum(hbaseS1Path);
+
+// Verify if Snap S1 checksum is same as the current version one
+Assert.assertEquals("Live and snap1 file checksum doesn't match!",
+hbaseWALFileCksum0, fs.getFileChecksum(hbaseS1Path));
+
+int newWriteLength = (int) (BLOCKSIZE * 1.5);
+byte[] buf = new byte[newWriteLength];
+Random random = new Random();
+random.nextBytes(buf);
+writeToStream(hbaseOutputStream, buf);
+
+// Create Snapshot S2
+final Path hbaseS2Dir = SnapshotTestHelper.createSnapshot(
+fs, hbaseSnapRootDir, hbaseSnap2Name);
+final Path hbaseS2Path = new Path(hbaseS2Dir, hbaseFileName);
+final FileChecksum hbaseFileCksumS2 = fs.getFileChecksum(hbaseS2Path);
+
+// Verify if the s1 checksum is still the same
+Assert.assertEquals("Snap file checksum has changed!",
+hbaseFileCksumS1, fs.getFileChecksum(hbaseS1Path));
+// Verify if the s2 checksum is different from the s1 checksum
+Assert.assertNotEquals("Snap1 and snap2 file checksum should differ!",
+hbaseFileCksumS1, hbaseFileCksumS2);
+
+newWriteLength = (int) (BLOCKSIZE * 2.5);
+buf = new byte[newWriteLength];
+random.nextBytes(buf);
+writeToStream(hbaseOutputStream, buf);
+
+// Create Snapshot S3
+final Path hbaseS3Dir = SnapshotTestHelper.createSnapshot(
+fs, hbaseSnapRootDir, hbaseSnap3Name);
+final Path hbaseS3Path = new Path(hbaseS3Dir, hbaseFileName);
+FileChecksum 

[23/45] hadoop git commit: YARN-4830. Add support for resource types in the nodemanager. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-4830. Add support for resource types in the nodemanager. Contributed by 
Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c94e758
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c94e758
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c94e758

Branch: refs/heads/YARN-3926
Commit: 6c94e758682bd8c0df891abdc29c81fc3d01fa9f
Parents: 52ba61a
Author: Varun Vasudev 
Authored: Sat Jun 11 14:33:46 2016 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:54 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |   3 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  19 ++-
 .../FileSystemBasedConfigurationProvider.java   |   3 +-
 .../hadoop/yarn/LocalConfigurationProvider.java |   3 +-
 .../api/records/impl/pb/ResourcePBImpl.java |  53 +++---
 .../yarn/util/resource/ResourceUtils.java   | 168 +++
 .../yarn/util/resource/TestResourceUtils.java   |  29 +++-
 .../resource-types/node-resources-1.xml |  29 
 .../resource-types/node-resources-2.xml |  39 +
 .../nodemanager/NodeStatusUpdaterImpl.java  |   7 +-
 .../util/NodeManagerHardwareUtils.java  |  52 ++
 .../resourcemanager/ResourceTrackerService.java |   9 +-
 12 files changed, 342 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c94e758/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index ee8ef03..c9c6a7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -308,7 +308,8 @@ public abstract class Resource implements 
Comparable {
 continue;
   }
   if (entry.getKey().equals(ResourceInformation.VCORES.getName())
-  && entry.getValue().getUnits().equals("")) {
+  && entry.getValue().getUnits()
+  .equals(ResourceInformation.VCORES.getUnits())) {
 continue;
   }
   sb.append(", ").append(entry.getKey()).append(": ")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c94e758/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d352512..48e6a3a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -65,6 +65,10 @@ public class YarnConfiguration extends Configuration {
   "resource-types.xml";
 
   @Private
+  public static final String NODE_RESOURCES_CONFIGURATION_FILE =
+  "node-resources.xml";
+
+  @Private
   public static final List RM_CONFIGURATION_FILES =
   Collections.unmodifiableList(Arrays.asList(
   RESOURCE_TYPES_CONFIGURATION_FILE,
@@ -74,6 +78,16 @@ public class YarnConfiguration extends Configuration {
   YARN_SITE_CONFIGURATION_FILE,
   CORE_SITE_CONFIGURATION_FILE));
 
+  @Private
+  public static final List NM_CONFIGURATION_FILES =
+  Collections.unmodifiableList(Arrays.asList(
+  NODE_RESOURCES_CONFIGURATION_FILE,
+  DR_CONFIGURATION_FILE,
+  CS_CONFIGURATION_FILE,
+  HADOOP_POLICY_CONFIGURATION_FILE,
+  YARN_SITE_CONFIGURATION_FILE,
+  CORE_SITE_CONFIGURATION_FILE));
+
   @Evolving
   public static final int APPLICATION_MAX_TAGS = 10;
 
@@ -112,12 +126,15 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_PREFIX = "yarn.";
 
   /
-  // Scheduler resource types configs
+  // Resource types configs
   
 
   public static final String RESOURCE_TYPES =
   YarnConfiguration.YARN_PREFIX + "resource-types";
 
+  public static final String NM_RESOURCES_PREFIX =
+  YarnConfiguration.NM_PREFIX + "resource-type.";
+
   /** Delay before deleting resource to ease debugging of NM issues */
 

[18/45] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-08-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/64439e4c/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
new file mode 100644
index 000..63fbc9d
--- /dev/null
+++ b/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
@@ -0,0 +1,283 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
+
+The binary distribution of this product bundles binaries of
+org.iq80.leveldb:leveldb-api (https://github.com/dain/leveldb), which has the
+following notices:
+* Copyright 2011 Dain Sundstrom 
+* Copyright 2011 FuseSource Corp. http://fusesource.com
+
+The binary distribution of this product bundles binaries of
+org.fusesource.hawtjni:hawtjni-runtime (https://github.com/fusesource/hawtjni),
+which has the following notices:
+* This product includes software developed by FuseSource Corp.
+  http://fusesource.com
+* This product includes software developed at
+  Progress Software Corporation and/or its  subsidiaries or affiliates.
+* This product includes software developed by IBM Corporation and others.
+
+The binary distribution of this product bundles binaries of
+AWS Java SDK 1.10.6,
+which has the following notices:
+ * This software includes third party software subject to the following
+ copyrights: - XML parsing and utility functions from JetS3t - Copyright
+ 2006-2009 James Murty. - JSON parsing and utility functions from JSON.org -
+ Copyright 2002 JSON.org. - PKCS#1 PEM encoded private key parsing and utility
+ functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc.
+
+The binary distribution of this product bundles binaries of
+Gson 2.2.4,
+which has the following notices:
+
+The Netty Project
+=
+
+Please visit the Netty web site for more information:
+
+  * http://netty.io/
+
+Copyright 2014 The Netty Project
+
+The Netty Project licenses this file to you under the Apache License,
+version 2.0 (the "License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at:
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+Also, please refer to each LICENSE..txt file, which is located in
+the 'license' directory of the distribution file, for the license terms of the
+components that this product depends on.
+
+---
+This product contains the extensions to Java Collections Framework which has
+been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+  * LICENSE:
+* license/LICENSE.jsr166y.txt (Public Domain)
+  * HOMEPAGE:
+* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+* 
http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+This product contains a modified version of Robert Harder's Public Domain
+Base64 Encoder and Decoder, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.base64.txt (Public Domain)
+  * HOMEPAGE:
+* http://iharder.sourceforge.net/current/java/base64/
+
+This product contains a modified portion of 'Webbit', an event based
+WebSocket and HTTP server, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.webbit.txt (BSD License)
+  * HOMEPAGE:
+* https://github.com/joewalnes/webbit
+
+This product contains a modified portion of 'SLF4J', a simple logging
+facade for Java, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.slf4j.txt (MIT License)
+  * HOMEPAGE:
+* http://www.slf4j.org/
+
+This product contains a modified portion of 'ArrayDeque', written by Josh
+Bloch of Google, Inc:
+
+  * LICENSE:
+* license/LICENSE.deque.txt (Public Domain)
+
+This product contains a modified portion of 'Apache Harmony', an open source
+Java SE, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.harmony.txt (Apache License 2.0)
+  * HOMEPAGE:
+* http://archive.apache.org/dist/harmony/
+
+This product contains a modified version of Roland Kuhn's ASL2
+AbstractNodeQueue, which is based on Dmitriy Vyukov's non-intrusive MPSC queue.
+It can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.abstractnodequeue.txt (Public Domain)
+  * HOMEPAGE:
+* 
https://github.com/akka/akka/blob/wip-2.2.3-for-scala-2.11/akka-actor/src/main/java/akka/dispatch/AbstractNodeQueue.java
+
+This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+and decompression library 

[42/45] hadoop git commit: YARN-6610. [YARN-3926] DominantResourceCalculator#getResourceAsValue dominant param is updated to handle multiple resources. Contributed by Daniel Templeton.

2017-08-21 Thread wangda
YARN-6610. [YARN-3926] DominantResourceCalculator#getResourceAsValue dominant 
param is updated to handle multiple resources. Contributed by Daniel Templeton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a77d6d4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a77d6d4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a77d6d4e

Branch: refs/heads/YARN-3926
Commit: a77d6d4e4d0846b34e3eb1cb51baa9516650aa9c
Parents: a39181f
Author: Sunil G 
Authored: Thu Aug 17 18:32:53 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../resource/DominantResourceCalculator.java| 275 ---
 .../util/resource/TestResourceCalculator.java   | 226 ---
 2 files changed, 429 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a77d6d4e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 5992ba3..40b38b9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -17,12 +17,18 @@
 */
 package org.apache.hadoop.yarn.util.resource;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Arrays;
 
 /**
  * A {@link ResourceCalculator} which uses the concept of
@@ -48,6 +54,7 @@ import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 @Private
 @Unstable
 public class DominantResourceCalculator extends ResourceCalculator {
+  static final Log LOG = LogFactory.getLog(DominantResourceCalculator.class);
 
   public DominantResourceCalculator() {
   }
@@ -92,7 +99,6 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   @Override
   public int compare(Resource clusterResource, Resource lhs, Resource rhs,
   boolean singleType) {
-
 if (lhs.equals(rhs)) {
   return 0;
 }
@@ -101,55 +107,232 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   return this.compare(lhs, rhs);
 }
 
-float l = getResourceAsValue(clusterResource, lhs, true);
-float r = getResourceAsValue(clusterResource, rhs, true);
+// We have to calculate the shares for all resource types for both
+// resources and then look for which resource has the biggest
+// share overall.
+ResourceInformation[] clusterRes = clusterResource.getResources();
+// If array creation shows up as a time sink, these arrays could be cached
+// because they're always the same length.
+double[] lhsShares = new double[clusterRes.length];
+double[] rhsShares = new double[clusterRes.length];
+double diff;
+
+try {
+  if (singleType) {
+double[] max = new double[2];
+
+calculateShares(clusterRes, lhs, rhs, lhsShares, rhsShares, max);
+
+diff = max[0] - max[1];
+  } else if (clusterRes.length == 2) {
+// Special case to handle the common scenario of only CPU and memory
+// so the we can optimize for performance
+diff = calculateSharesForMandatoryResources(clusterRes, lhs, rhs,
+lhsShares, rhsShares);
+  } else {
+calculateShares(clusterRes, lhs, rhs, lhsShares, rhsShares);
+
+Arrays.sort(lhsShares);
+Arrays.sort(rhsShares);
+
+diff = compareShares(lhsShares, rhsShares);
+  }
+} catch (ArrayIndexOutOfBoundsException ex) {
+  StringWriter out = new StringWriter(); // No need to close a StringWriter
+  ex.printStackTrace(new PrintWriter(out));
+
+  LOG.error("A problem was encountered while calculating resource "
+  + "availability that should not occur under normal circumstances. "
+  + "Please report this error to the Hadoop community by opening a "
+  

[21/45] hadoop git commit: YARN-4715. Add support to read resource types from a config file. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-4715. Add support to read resource types from a config file. Contributed 
by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf68c1a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf68c1a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf68c1a1

Branch: refs/heads/YARN-3926
Commit: cf68c1a183c854b000e5176b689d23cd9af5f7d3
Parents: 88026b2
Author: Varun Vasudev 
Authored: Fri Mar 11 15:03:15 2016 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:54 2017 -0700

--
 .../hadoop/yarn/api/records/Resource.java   |  24 +-
 .../yarn/api/records/ResourceInformation.java   |   8 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../exceptions/ResourceNotFoundException.java   |   2 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   8 +
 .../api/records/impl/pb/ResourcePBImpl.java |  72 ++
 .../resource/DominantResourceCalculator.java|   5 +-
 .../yarn/util/resource/ResourceUtils.java   | 229 +
 .../hadoop/yarn/util/resource/Resources.java|  18 +-
 .../src/main/resources/yarn-default.xml |  10 +
 .../yarn/util/resource/TestResourceUtils.java   | 248 +++
 .../resource-types/resource-types-1.xml |  18 ++
 .../resource-types/resource-types-2.xml |  29 +++
 .../resource-types/resource-types-3.xml |  24 ++
 .../resource-types/resource-types-4.xml |  34 +++
 .../resource-types/resource-types-error-1.xml   |  29 +++
 .../resource-types/resource-types-error-2.xml   |  29 +++
 .../resource-types/resource-types-error-3.xml   |  29 +++
 .../resource-types/resource-types-error-4.xml   |  24 ++
 19 files changed, 762 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf68c1a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 2371b13..ee8ef03 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -101,15 +101,6 @@ public abstract class Resource implements 
Comparable {
 return new SimpleResource(memory, vCores);
   }
 
-  @Public
-  @Stable
-  public static Resource newInstance(
-  Map resources) {
-Resource resource = Records.newRecord(Resource.class);
-resource.setResources(resources);
-return resource;
-  }
-
   /**
* This method is DEPRECATED:
* Use {@link Resource#getMemorySize()} instead
@@ -234,15 +225,6 @@ public abstract class Resource implements 
Comparable {
   public abstract Long getResourceValue(String resource) throws YarnException;
 
   /**
-   * Set the resources to the map specified.
-   *
-   * @param resources Desired resources
-   */
-  @Public
-  @Evolving
-  public abstract void setResources(Map 
resources);
-
-  /**
* Set the ResourceInformation object for a particular resource.
*
* @param resource the resource for which the ResourceInformation is provided
@@ -276,8 +258,8 @@ public abstract class Resource implements 
Comparable {
 result = prime * result + getVirtualCores();
 for (Map.Entry entry : getResources()
 .entrySet()) {
-  if (entry.getKey().equals(ResourceInformation.MEMORY.getName()) || entry
-  .getKey().equals(ResourceInformation.VCORES.getName())) {
+  if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
+  || entry.getKey().equals(ResourceInformation.VCORES.getName())) {
 continue;
   }
   result = prime * result + entry.getValue().hashCode();
@@ -320,7 +302,7 @@ public abstract class Resource implements 
Comparable {
 .append(getVirtualCores());
 for (Map.Entry entry : getResources()
 .entrySet()) {
-  if (entry.getKey().equals(ResourceInformation.MEMORY.getName())
+  if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
   && entry.getValue().getUnits()
   .equals(ResourceInformation.MEMORY_MB.getUnits())) {
 continue;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf68c1a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java

[40/45] hadoop git commit: YARN-6908. ResourceProfilesManagerImpl is missing @Overrides on methods (Contributed by Sunil G. via Daniel Templeton)

2017-08-21 Thread wangda
YARN-6908. ResourceProfilesManagerImpl is missing @Overrides on methods
(Contributed by Sunil G. via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a39181f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a39181f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a39181f6

Branch: refs/heads/YARN-3926
Commit: a39181f640de03e6b1a55edd3cffdf10c0b470a7
Parents: d5e93e1
Author: Daniel Templeton 
Authored: Wed Aug 16 09:41:52 2017 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../resource/ResourceProfilesManager.java   | 34 
 .../resource/ResourceProfilesManagerImpl.java   |  7 
 2 files changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a39181f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
index af54f05..c330e25 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.resource;
 
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.Resource;
 
@@ -28,19 +30,51 @@ import java.util.Map;
  * Interface for the resource profiles manager. Provides an interface to get
  * the list of available profiles and some helper functions.
  */
+@Public
+@Unstable
 public interface ResourceProfilesManager {
 
+  /**
+   * Method to handle all initialization steps for ResourceProfilesManager.
+   * @param config Configuration object
+   * @throws IOException when invalid resource profile names are loaded
+   */
   void init(Configuration config) throws IOException;
 
+  /**
+   * Get the resource capability associated with given profile name.
+   * @param profile name of resource profile
+   * @return resource capability for given profile
+   */
   Resource getProfile(String profile);
 
+  /**
+   * Get all supported resource profiles.
+   * @return a map of resource objects associated with each profile
+   */
   Map getResourceProfiles();
 
+  /**
+   * Reload profiles based on updated configuration.
+   * @throws IOException when invalid resource profile names are loaded
+   */
   void reloadProfiles() throws IOException;
 
+  /**
+   * Get default supported resource profile.
+   * @return resource object which is default
+   */
   Resource getDefaultProfile();
 
+  /**
+   * Get minimum supported resource profile.
+   * @return resource object which is minimum
+   */
   Resource getMinimumProfile();
 
+  /**
+   * Get maximum supported resource profile.
+   * @return resource object which is maximum
+   */
   Resource getMaximumProfile();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a39181f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
index b5ab384..42d38b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
+++ 

[45/45] hadoop git commit: YARN-6788. [YARN-3926] Improve performance of resource profile branch (Contributed by Sunil Govindan via Daniel Templeton)

2017-08-21 Thread wangda
YARN-6788. [YARN-3926] Improve performance of resource profile branch
(Contributed by Sunil Govindan via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f76e25d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f76e25d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f76e25d

Branch: refs/heads/YARN-3926
Commit: 0f76e25d9a258066c6d8c55dc0315befece34f41
Parents: f7a9c80
Author: Daniel Templeton 
Authored: Fri Aug 4 08:42:34 2017 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|  18 +
 .../yarn/api/records/ProfileCapability.java |   8 +-
 .../hadoop/yarn/api/records/Resource.java   | 234 
 .../yarn/api/records/ResourceInformation.java   |  13 +-
 .../yarn/api/records/impl/BaseResource.java | 133 +
 .../yarn/api/records/impl/package-info.java |  22 +
 .../hadoop/yarn/util/UnitsConversionUtil.java   |   8 +-
 .../yarn/util/resource/ResourceUtils.java   | 534 +++
 .../hadoop/yarn/util/resource/package-info.java |  22 +
 .../yarn/client/api/impl/TestAMRMClient.java|   8 +-
 .../yarn/api/records/impl/pb/ProtoUtils.java|   5 +-
 .../api/records/impl/pb/ResourcePBImpl.java | 110 ++--
 .../resource/DominantResourceCalculator.java|  67 ++-
 .../yarn/util/resource/ResourceUtils.java   | 488 -
 .../hadoop/yarn/util/resource/Resources.java| 194 ---
 .../yarn/util/resource/TestResourceUtils.java   |  14 +-
 .../yarn/util/resource/TestResources.java   |   7 +-
 .../resource/ResourceProfilesManagerImpl.java   |   8 +-
 .../rmapp/attempt/RMAppAttemptMetrics.java  |  11 +-
 .../scheduler/SchedulerApplicationAttempt.java  |   9 +-
 .../webapp/dao/SchedulerInfo.java   |   3 +-
 .../server/resourcemanager/TestAppManager.java  |   1 +
 22 files changed, 1045 insertions(+), 872 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f76e25d/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index ce7a9c6..a5b4021 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -603,4 +603,22 @@
 
   
 
+  
+
+
+
+
+  
+
+  
+
+
+
+  
+
+  
+
+
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f76e25d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
index 1a8d1c3..2cb4670 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
@@ -162,10 +162,10 @@ public abstract class ProfileCapability {
 
 if (capability.getProfileCapabilityOverride() != null &&
 !capability.getProfileCapabilityOverride().equals(none)) {
-  for (Map.Entry entry : capability
-  .getProfileCapabilityOverride().getResources().entrySet()) {
-if (entry.getValue() != null && entry.getValue().getValue() >= 0) {
-  resource.setResourceInformation(entry.getKey(), entry.getValue());
+  for (ResourceInformation entry : capability
+  .getProfileCapabilityOverride().getResources()) {
+if (entry != null && entry.getValue() >= 0) {
+  resource.setResourceInformation(entry.getName(), entry);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f76e25d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 9a8e2ec..a485a57 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 

[02/45] hadoop git commit: HDFS-12072. Provide fairness between EC and non-EC recovery tasks. Contributed by Eddy Xu.

2017-08-21 Thread wangda
HDFS-12072. Provide fairness between EC and non-EC recovery tasks. Contributed 
by Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2989488
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2989488
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2989488

Branch: refs/heads/YARN-3926
Commit: b29894889742dda654cd88a7ce72a4e51fccb328
Parents: ab1a8ae
Author: Andrew Wang 
Authored: Thu Aug 17 15:26:11 2017 -0700
Committer: Andrew Wang 
Committed: Thu Aug 17 15:26:11 2017 -0700

--
 .../blockmanagement/DatanodeDescriptor.java |  6 +-
 .../server/blockmanagement/DatanodeManager.java | 45 ++---
 .../blockmanagement/TestDatanodeManager.java| 96 +++-
 3 files changed, 108 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2989488/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 2bd4a20..d35894c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -661,7 +661,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
 return erasurecodeBlocks.size();
   }
 
-  public List getReplicationCommand(int maxTransfers) {
+  int getNumberOfReplicateBlocks() {
+return replicateBlocks.size();
+  }
+
+  List getReplicationCommand(int maxTransfers) {
 return replicateBlocks.poll(maxTransfers);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2989488/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 78783ca..c75bcea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1663,21 +1663,38 @@ public class DatanodeManager {
 }
 
 final List cmds = new ArrayList<>();
-// check pending replication
-List pendingList = nodeinfo.getReplicationCommand(
-maxTransfers);
-if (pendingList != null) {
-  cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
-  pendingList));
-  maxTransfers -= pendingList.size();
-}
-// check pending erasure coding tasks
-List pendingECList = nodeinfo
-.getErasureCodeCommand(maxTransfers);
-if (pendingECList != null) {
-  cmds.add(new BlockECReconstructionCommand(
-  DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList));
+// Allocate _approximately_ maxTransfers pending tasks to DataNode.
+// NN chooses pending tasks based on the ratio between the lengths of
+// replication and erasure-coded block queues.
+int totalReplicateBlocks = nodeinfo.getNumberOfReplicateBlocks();
+int totalECBlocks = nodeinfo.getNumberOfBlocksToBeErasureCoded();
+int totalBlocks = totalReplicateBlocks + totalECBlocks;
+if (totalBlocks > 0) {
+  int numReplicationTasks = (int) Math.ceil(
+  (double) (totalReplicateBlocks * maxTransfers) / totalBlocks);
+  int numECTasks = (int) Math.ceil(
+  (double) (totalECBlocks * maxTransfers) / totalBlocks);
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Pending replication tasks: " + numReplicationTasks
++ " erasure-coded tasks: " + numECTasks);
+  }
+  // check pending replication tasks
+  List pendingList = nodeinfo.getReplicationCommand(
+  numReplicationTasks);
+  if (pendingList != null && !pendingList.isEmpty()) {
+cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
+pendingList));
+  }
+  // check pending erasure coding tasks
+  List pendingECList = nodeinfo
+  .getErasureCodeCommand(numECTasks);
+  if (pendingECList != null && !pendingECList.isEmpty()) {
+cmds.add(new BlockECReconstructionCommand(
+

[25/45] hadoop git commit: YARN-5587. Add support for resource profiles. (vvasudev via asuresh)

2017-08-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e1ed84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
index 1a70933..032bbc3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
@@ -33,6 +33,8 @@ import 
org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProfilesProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProfileEntry;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
@@ -59,6 +61,7 @@ public class RegisterApplicationMasterResponsePBImpl extends
   private List containersFromPreviousAttempts = null;
   private List nmTokens = null;
   private EnumSet schedulerResourceTypes = null;
+  private Map profiles = null;
 
   public RegisterApplicationMasterResponsePBImpl() {
 builder = RegisterApplicationMasterResponseProto.newBuilder();
@@ -123,6 +126,9 @@ public class RegisterApplicationMasterResponsePBImpl extends
 if(schedulerResourceTypes != null) {
   addSchedulerResourceTypes();
 }
+if (profiles != null) {
+  addResourceProfiles();
+}
   }
 
 
@@ -433,6 +439,58 @@ public class RegisterApplicationMasterResponsePBImpl 
extends
 this.schedulerResourceTypes.addAll(types);
   }
 
+  private void addResourceProfiles() {
+maybeInitBuilder();
+builder.clearResourceProfiles();
+if (profiles == null) {
+  return;
+}
+ResourceProfilesProto.Builder profilesBuilder =
+ResourceProfilesProto.newBuilder();
+for (Map.Entry entry : profiles.entrySet()) {
+  ResourceProfileEntry.Builder entryBuilder =
+  ResourceProfileEntry.newBuilder();
+  entryBuilder.setName(entry.getKey());
+  entryBuilder.setResources(convertToProtoFormat(entry.getValue()));
+  profilesBuilder.addResourceProfilesMap(entryBuilder.build());
+}
+builder.setResourceProfiles(profilesBuilder.build());
+  }
+
+  private void initResourceProfiles() {
+if (this.profiles != null) {
+  return;
+}
+this.profiles = new HashMap<>();
+RegisterApplicationMasterResponseProtoOrBuilder p =
+viaProto ? proto : builder;
+
+if (p.hasResourceProfiles()) {
+  ResourceProfilesProto profilesProto = p.getResourceProfiles();
+  for (ResourceProfileEntry entry : profilesProto
+  .getResourceProfilesMapList()) {
+this.profiles
+.put(entry.getName(), 
convertFromProtoFormat(entry.getResources()));
+  }
+}
+  }
+
+  @Override
+  public Map getResourceProfiles() {
+initResourceProfiles();
+return this.profiles;
+  }
+
+  @Override
+  public void setResourceProfiles(Map profilesMap) {
+if (profilesMap == null) {
+  return;
+}
+initResourceProfiles();
+this.profiles.clear();
+this.profiles.putAll(profilesMap);
+  }
+
   private Resource convertFromProtoFormat(ResourceProto resource) {
 return new ResourcePBImpl(resource);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9e1ed84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 63b466b..955ea52 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -144,8 +144,8 @@ public class 

[29/45] hadoop git commit: YARN-5708. Implement APIs to get resource profiles from the RM. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-5708. Implement APIs to get resource profiles from the RM. Contributed by 
Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f01f86c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f01f86c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f01f86c8

Branch: refs/heads/YARN-3926
Commit: f01f86c8a08e6866c3bb2aadb8bb7a7e5f662822
Parents: 9291d6d
Author: Varun Vasudev 
Authored: Sat Oct 22 20:15:47 2016 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:52:55 2017 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  13 ++
 .../hadoop/mapred/TestClientRedirect.java   |  17 +++
 .../yarn/api/ApplicationClientProtocol.java |  37 +
 .../GetAllResourceProfilesRequest.java  |  35 +
 .../GetAllResourceProfilesResponse.java |  60 
 .../GetResourceProfileRequest.java  |  59 
 .../GetResourceProfileResponse.java |  68 +
 .../yarn/api/records/ProfileCapability.java |  88 
 .../main/proto/applicationclient_protocol.proto |   2 +
 .../src/main/proto/yarn_protos.proto|  15 ++
 .../src/main/proto/yarn_service_protos.proto|  16 +++
 .../hadoop/yarn/client/api/YarnClient.java  |  25 
 .../yarn/client/api/impl/YarnClientImpl.java|  19 +++
 .../ApplicationClientProtocolPBClientImpl.java  |  36 +
 .../ApplicationClientProtocolPBServiceImpl.java |  42 ++
 .../pb/GetAllResourceProfilesRequestPBImpl.java |  55 +++
 .../GetAllResourceProfilesResponsePBImpl.java   | 142 +++
 .../pb/GetResourceProfileRequestPBImpl.java | 101 +
 .../pb/GetResourceProfileResponsePBImpl.java| 112 +++
 .../impl/pb/ProfileCapabilityPBImpl.java| 134 +
 .../hadoop/yarn/api/TestPBImplRecords.java  |  34 +
 .../yarn/server/MockResourceManagerFacade.java  |  16 +++
 .../server/resourcemanager/ClientRMService.java |  41 ++
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  12 ++
 .../server/resourcemanager/ResourceManager.java |   9 ++
 26 files changed, 1193 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f01f86c8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 62aa497..a365f80 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -517,4 +518,16 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 client.killApplication(appId, diagnostics);
   }
+
+  @Override
+  public Map getResourceProfiles()
+  throws YarnException, IOException {
+return client.getResourceProfiles();
+  }
+
+  @Override
+  public Resource getResourceProfile(String profile)
+  throws YarnException, IOException {
+return client.getResourceProfile(profile);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f01f86c8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 65eac65..cc50be0 100644
--- 

[01/45] hadoop git commit: YARN-6988. container-executor fails for docker when command length > 4096 B. Contributed by Eric Badger [Forced Update!]

2017-08-21 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3926 115af40dc -> f461a2d42 (forced update)


YARN-6988. container-executor fails for docker when command length > 4096 B. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab1a8ae8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab1a8ae8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab1a8ae8

Branch: refs/heads/YARN-3926
Commit: ab1a8ae85f8c61304a0f437cdc61cc5aeda36a4b
Parents: dd7916d
Author: Jason Lowe 
Authored: Thu Aug 17 15:50:14 2017 -0500
Committer: Jason Lowe 
Committed: Thu Aug 17 15:50:14 2017 -0500

--
 .../impl/container-executor.c   | 38 +---
 .../main/native/container-executor/impl/util.h  |  7 
 2 files changed, 33 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab1a8ae8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 9f754c4..7361808 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1417,9 +1417,10 @@ int run_docker(const char *command_file) {
   char* docker_command = parse_docker_command_file(command_file);
   char* docker_binary = get_section_value(DOCKER_BINARY_KEY, _cfg);
   docker_binary = check_docker_binary(docker_binary);
+  size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024);
 
-  char* docker_command_with_binary = calloc(sizeof(char), EXECUTOR_PATH_MAX);
-  snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", 
docker_binary, docker_command);
+  char* docker_command_with_binary = calloc(sizeof(char), command_size);
+  snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, 
docker_command);
   char **args = split_delimiter(docker_command_with_binary, " ");
 
   int exit_code = -1;
@@ -1567,16 +1568,24 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   char *script_file_dest = NULL;
   char *cred_file_dest = NULL;
   char *exit_code_file = NULL;
-  char docker_command_with_binary[EXECUTOR_PATH_MAX];
-  char docker_wait_command[EXECUTOR_PATH_MAX];
-  char docker_logs_command[EXECUTOR_PATH_MAX];
-  char docker_inspect_command[EXECUTOR_PATH_MAX];
-  char docker_rm_command[EXECUTOR_PATH_MAX];
+  char *docker_command_with_binary = NULL;
+  char *docker_wait_command = NULL;
+  char *docker_logs_command = NULL;
+  char *docker_inspect_command = NULL;
+  char *docker_rm_command = NULL;
   int container_file_source =-1;
   int cred_file_source = -1;
   int BUFFER_SIZE = 4096;
   char buffer[BUFFER_SIZE];
 
+  size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024);
+
+  docker_command_with_binary = calloc(sizeof(char), command_size);
+  docker_wait_command = calloc(sizeof(char), command_size);
+  docker_logs_command = calloc(sizeof(char), command_size);
+  docker_inspect_command = calloc(sizeof(char), command_size);
+  docker_rm_command = calloc(sizeof(char), command_size);
+
   gid_t user_gid = getegid();
   uid_t prev_uid = geteuid();
 
@@ -1621,7 +1630,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
 goto cleanup;
   }
 
-  snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", 
docker_binary, docker_command);
+  snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, 
docker_command);
 
   fprintf(LOGFILE, "Launching docker container...\n");
   FILE* start_docker = popen(docker_command_with_binary, "r");
@@ -1634,7 +1643,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
 goto cleanup;
   }
 
-  snprintf(docker_inspect_command, EXECUTOR_PATH_MAX,
+  snprintf(docker_inspect_command, command_size,
 "%s inspect --format {{.State.Pid}} %s",
 docker_binary, container_id);
 
@@ -1679,7 +1688,7 @@ int launch_docker_container_as_user(const char * user, 
const char *app_id,
   goto cleanup;
 }
 
-snprintf(docker_wait_command, EXECUTOR_PATH_MAX,
+snprintf(docker_wait_command, command_size,
   "%s wait %s", docker_binary, container_id);
 
 fprintf(LOGFILE, 

[15/45] hadoop git commit: HDFS-11738. Hedged pread takes more time when block moved from initial locations. Contributed by Vinayakumar B.

2017-08-21 Thread wangda
HDFS-11738. Hedged pread takes more time when block moved from initial 
locations. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6bfb2fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6bfb2fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6bfb2fc

Branch: refs/heads/YARN-3926
Commit: b6bfb2fcb2391d51b8de97c01c1290880779132e
Parents: 736ceab
Author: John Zhuge 
Authored: Mon Aug 21 13:44:32 2017 -0700
Committer: John Zhuge 
Committed: Mon Aug 21 13:45:30 2017 -0700

--
 .../hadoop/hdfs/DFSClientFaultInjector.java |   2 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 145 +++
 .../java/org/apache/hadoop/hdfs/TestPread.java  |  26 +++-
 3 files changed, 112 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6bfb2fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
index 748edcd..b58cf16 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
@@ -61,4 +61,6 @@ public class DFSClientFaultInjector {
   public boolean skipRollingRestartWait() {
 return false;
   }
+
+  public void sleepBeforeHedgedGet() {}
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6bfb2fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6bff172..97d3de4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -830,60 +830,85 @@ public class DFSInputStream extends FSInputStream
 
   private DNAddrPair chooseDataNode(LocatedBlock block,
   Collection ignoredNodes) throws IOException {
+return chooseDataNode(block, ignoredNodes, true);
+  }
+
+  /**
+   * Choose datanode to read from.
+   *
+   * @param block Block to choose datanode addr from
+   * @param ignoredNodes  Ignored nodes inside.
+   * @param refetchIfRequired Whether to refetch if no nodes to chose
+   *  from.
+   * @return Returns chosen DNAddrPair; Can be null if refetchIfRequired is
+   * false.
+   */
+  private DNAddrPair chooseDataNode(LocatedBlock block,
+  Collection ignoredNodes, boolean refetchIfRequired)
+  throws IOException {
 while (true) {
   DNAddrPair result = getBestNodeDNAddrPair(block, ignoredNodes);
   if (result != null) {
 return result;
+  } else if (refetchIfRequired) {
+block = refetchLocations(block, ignoredNodes);
   } else {
-String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
-deadNodes, ignoredNodes);
-String blockInfo = block.getBlock() + " file=" + src;
-if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
-  String description = "Could not obtain block: " + blockInfo;
-  DFSClient.LOG.warn(description + errMsg
-  + ". Throwing a BlockMissingException");
-  throw new BlockMissingException(src, description,
-  block.getStartOffset());
-}
-
-DatanodeInfo[] nodes = block.getLocations();
-if (nodes == null || nodes.length == 0) {
-  DFSClient.LOG.info("No node available for " + blockInfo);
-}
-DFSClient.LOG.info("Could not obtain " + block.getBlock()
-+ " from any node: " + errMsg
-+ ". Will get new block locations from namenode and retry...");
-try {
-  // Introducing a random factor to the wait time before another retry.
-  // The wait time is dependent on # of failures and a random factor.
-  // At the first time of getting a BlockMissingException, the wait 
time
-  // is a random number between 0..3000 ms. If the first retry
-  // still fails, we will wait 3000 ms grace period before the 2nd 
retry.
-  // Also at the second retry, the waiting 

[31/45] hadoop git commit: YARN-6232. Update resource usage and preempted resource calculations to take into account all resource types. Contributed by Varun Vasudev.

2017-08-21 Thread wangda
YARN-6232. Update resource usage and preempted resource calculations to take 
into account all resource types. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fb5a99d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fb5a99d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fb5a99d

Branch: refs/heads/YARN-3926
Commit: 5fb5a99dc59c451575ca43a7880c7dfe9916ff62
Parents: b680025
Author: Sunil G 
Authored: Mon Mar 6 11:34:20 2017 +0530
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:01 2017 -0700

--
 .../records/ApplicationResourceUsageReport.java |  58 ++-
 .../src/main/proto/yarn_protos.proto|   7 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  35 +++--
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  16 +-
 .../ApplicationResourceUsageReportPBImpl.java   | 151 ---
 .../yarn/api/records/impl/pb/ProtoUtils.java|  34 +
 .../apache/hadoop/yarn/util/StringHelper.java   |  36 +
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  12 ++
 .../hadoop/yarn/api/TestPBImplRecords.java  |   4 +
 ...pplicationHistoryManagerOnTimelineStore.java |  18 ++-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  16 +-
 .../server/resourcemanager/RMAppManager.java|   8 +-
 .../server/resourcemanager/RMServerUtils.java   |  10 +-
 .../resourcemanager/recovery/RMStateStore.java  |   7 +-
 .../records/ApplicationAttemptStateData.java|  89 +--
 .../pb/ApplicationAttemptStateDataPBImpl.java   |  50 ++
 .../server/resourcemanager/rmapp/RMAppImpl.java |  43 +++---
 .../resourcemanager/rmapp/RMAppMetrics.java |  41 +++--
 .../attempt/AggregateAppResourceUsage.java  |  34 ++---
 .../rmapp/attempt/RMAppAttemptImpl.java |  32 ++--
 .../rmapp/attempt/RMAppAttemptMetrics.java  | 106 ++---
 .../rmcontainer/RMContainerImpl.java|  16 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  47 +++---
 .../resourcemanager/webapp/RMAppBlock.java  |  14 +-
 .../resourcemanager/webapp/dao/AppInfo.java |  15 +-
 .../webapp/dao/ResourceInfo.java|  36 -
 .../webapp/dao/SchedulerInfo.java   |   2 +-
 .../yarn_server_resourcemanager_recovery.proto  |   2 +
 .../server/resourcemanager/TestAppManager.java  |   7 +-
 .../TestContainerResourceUsage.java |   7 +-
 .../applicationsmanager/MockAsm.java|   9 +-
 .../metrics/TestSystemMetricsPublisher.java |  15 +-
 .../TestSystemMetricsPublisherForV2.java|  22 ++-
 .../recovery/RMStateStoreTestBase.java  |   8 +-
 .../recovery/TestZKRMStateStore.java|  23 +--
 .../resourcemanager/webapp/TestAppPage.java |   8 +-
 .../webapp/TestRMWebAppFairScheduler.java   |   5 +-
 .../DefaultClientRequestInterceptor.java|  16 ++
 .../clientrm/FederationClientInterceptor.java   |  15 ++
 .../router/clientrm/RouterClientRMService.java  |  18 +++
 .../PassThroughClientRequestInterceptor.java|  16 ++
 41 files changed, 854 insertions(+), 254 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb5a99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
index 3cf8f3d..f9c8975 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
@@ -24,6 +24,9 @@ import 
org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  * Contains various scheduling metrics to be reported by UI and CLI.
  */
@@ -35,9 +38,9 @@ public abstract class ApplicationResourceUsageReport {
   @Unstable
   public static ApplicationResourceUsageReport newInstance(
   int numUsedContainers, int numReservedContainers, Resource usedResources,
-  Resource reservedResources, Resource neededResources, long memorySeconds,
-  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc,
-  long preemptedMemorySeconds, long preemptedVcoresSeconds) {
+  Resource reservedResources, Resource 

[39/45] hadoop git commit: YARN-6935. [YARN-3926] ResourceProfilesManagerImpl.parseResource() has no need of the key parameter (Contributed by Manikandan R via Daniel Templeton)

2017-08-21 Thread wangda
YARN-6935. [YARN-3926] ResourceProfilesManagerImpl.parseResource() has no need 
of the key parameter
(Contributed by Manikandan R via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/984e9909
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/984e9909
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/984e9909

Branch: refs/heads/YARN-3926
Commit: 984e9909dc9f35451d4273c5f9a74d28fab24881
Parents: 0f76e25
Author: Daniel Templeton 
Authored: Fri Aug 11 16:32:13 2017 -0700
Committer: Wangda Tan 
Committed: Mon Aug 21 16:56:02 2017 -0700

--
 .../resource/ResourceProfilesManagerImpl.java   | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/984e9909/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
index ab6..b5ab384 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
@@ -87,22 +87,22 @@ public class ResourceProfilesManagerImpl implements 
ResourceProfilesManager {
 Iterator iterator = data.entrySet().iterator();
 while (iterator.hasNext()) {
   Map.Entry entry = (Map.Entry) iterator.next();
-  String key = entry.getKey().toString();
-  if (key.isEmpty()) {
+  String profileName = entry.getKey().toString();
+  if (profileName.isEmpty()) {
 throw new IOException(
 "Name of resource profile cannot be an empty string");
   }
   if (entry.getValue() instanceof Map) {
-Map value = (Map) entry.getValue();
+Map profileInfo = (Map) entry.getValue();
 // ensure memory and vcores are specified
-if (!value.containsKey(MEMORY) || !value.containsKey(VCORES)) {
+if (!profileInfo.containsKey(MEMORY) || 
!profileInfo.containsKey(VCORES)) {
   throw new IOException(
-  "Illegal resource profile definition; profile '" + key
+  "Illegal resource profile definition; profile '" + profileName
   + "' must contain '" + MEMORY + "' and '" + VCORES + "'");
 }
-Resource resource = parseResource(key, value);
-profiles.put(key, resource);
-LOG.info("Added profile '" + key + "' with resources " + resource);
+Resource resource = parseResource(profileInfo);
+profiles.put(profileName, resource);
+LOG.info("Added profile '" + profileName + "' with resources " + 
resource);
   }
 }
 // check to make sure mandatory profiles are present
@@ -116,9 +116,9 @@ public class ResourceProfilesManagerImpl implements 
ResourceProfilesManager {
 LOG.info("Loaded profiles " + profiles.keySet());
   }
 
-  private Resource parseResource(String key, Map value) throws IOException {
+  private Resource parseResource(Map profileInfo) throws IOException {
 Resource resource = Resource.newInstance(0, 0);
-Iterator iterator = value.entrySet().iterator();
+Iterator iterator = profileInfo.entrySet().iterator();
 Map resourceTypes = ResourceUtils
 .getResourceTypes();
 while (iterator.hasNext()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/45] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-08-21 Thread wangda
http://git-wip-us.apache.org/repos/asf/hadoop/blob/64439e4c/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
new file mode 100644
index 000..44880df
--- /dev/null
+++ b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
@@ -0,0 +1,1661 @@
+
+ Apache License
+   Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+  "License" shall mean the terms and conditions for use, reproduction,
+  and distribution as defined by Sections 1 through 9 of this document.
+
+  "Licensor" shall mean the copyright owner or entity authorized by
+  the copyright owner that is granting the License.
+
+  "Legal Entity" shall mean the union of the acting entity and all
+  other entities that control, are controlled by, or are under common
+  control with that entity. For the purposes of this definition,
+  "control" means (i) the power, direct or indirect, to cause the
+  direction or management of such entity, whether by contract or
+  otherwise, or (ii) ownership of fifty percent (50%) or more of the
+  outstanding shares, or (iii) beneficial ownership of such entity.
+
+  "You" (or "Your") shall mean an individual or Legal Entity
+  exercising permissions granted by this License.
+
+  "Source" form shall mean the preferred form for making modifications,
+  including but not limited to software source code, documentation
+  source, and configuration files.
+
+  "Object" form shall mean any form resulting from mechanical
+  transformation or translation of a Source form, including but
+  not limited to compiled object code, generated documentation,
+  and conversions to other media types.
+
+  "Work" shall mean the work of authorship, whether in Source or
+  Object form, made available under the License, as indicated by a
+  copyright notice that is included in or attached to the work
+  (an example is provided in the Appendix below).
+
+  "Derivative Works" shall mean any work, whether in Source or Object
+  form, that is based on (or derived from) the Work and for which the
+  editorial revisions, annotations, elaborations, or other modifications
+  represent, as a whole, an original work of authorship. For the purposes
+  of this License, Derivative Works shall not include works that remain
+  separable from, or merely link (or bind by name) to the interfaces of,
+  the Work and Derivative Works thereof.
+
+  "Contribution" shall mean any work of authorship, including
+  the original version of the Work and any modifications or additions
+  to that Work or Derivative Works thereof, that is intentionally
+  submitted to Licensor for inclusion in the Work by the copyright owner
+  or by an individual or Legal Entity authorized to submit on behalf of
+  the copyright owner. For the purposes of this definition, "submitted"
+  means any form of electronic, verbal, or written communication sent
+  to the Licensor or its representatives, including but not limited to
+  communication on electronic mailing lists, source code control systems,
+  and issue tracking systems that are managed by, or on behalf of, the
+  Licensor for the purpose of discussing and improving the Work, but
+  excluding communication that is conspicuously marked or otherwise
+  designated in writing by the copyright owner as "Not a Contribution."
+
+  "Contributor" shall mean Licensor and any individual or Legal Entity
+  on behalf of whom a Contribution has been received by Licensor and
+  subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  copyright license to reproduce, prepare Derivative Works of,
+  publicly display, publicly perform, sublicense, and distribute the
+  Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  (except as stated in this section) patent license to make, have made,
+  use, offer to sell, sell, import, and otherwise transfer the Work,
+  where such license applies only to those patent claims licensable
+  by such Contributor that are necessarily infringed by their
+  Contribution(s) alone or by 

hadoop git commit: HDFS-11482. Add storage type demand to into DFSNetworkTopology#chooseRandom. Contributed by Chen Liang.

2017-08-21 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 89fc7fe67 -> b3ea11dfd


HDFS-11482. Add storage type demand to into DFSNetworkTopology#chooseRandom. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3ea11df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3ea11df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3ea11df

Branch: refs/heads/branch-2
Commit: b3ea11dfdb46fcec86118a132bee9a9978df21dd
Parents: 89fc7fe
Author: Arpit Agarwal 
Authored: Mon Aug 21 14:07:59 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 21 14:07:59 2017 -0700

--
 .../org/apache/hadoop/net/InnerNodeImpl.java|   8 +-
 .../net/NetworkTopologyWithNodeGroup.java   |   2 +-
 .../hadoop/hdfs/net/DFSNetworkTopology.java | 289 
 .../hadoop/hdfs/net/DFSTopologyNodeImpl.java| 277 
 .../blockmanagement/DatanodeDescriptor.java |  10 +
 .../apache/hadoop/hdfs/DFSNetworkTopology.java  |  36 --
 .../apache/hadoop/hdfs/DFSTopologyNodeImpl.java | 255 ---
 .../hadoop/hdfs/TestDFSNetworkTopology.java | 260 ---
 .../hadoop/hdfs/net/TestDFSNetworkTopology.java | 449 +++
 9 files changed, 1030 insertions(+), 556 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3ea11df/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
index 81eaf7f..5a2931b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
@@ -63,7 +63,7 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
   /** Judge if this node represents a rack
* @return true if it has no child or its children are not InnerNodes
*/
-  boolean isRack() {
+  public boolean isRack() {
 if (children.isEmpty()) {
   return true;
 }
@@ -81,7 +81,7 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
* @param n a node
* @return true if this node is an ancestor of n
*/
-  protected boolean isAncestor(Node n) {
+  public boolean isAncestor(Node n) {
 return getPath(this).equals(NodeBase.PATH_SEPARATOR_STR) ||
   (n.getNetworkLocation()+NodeBase.PATH_SEPARATOR_STR).
   startsWith(getPath(this)+NodeBase.PATH_SEPARATOR_STR);
@@ -92,12 +92,12 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
* @param n a node
* @return true if this node is the parent of n
*/
-  protected boolean isParent(Node n) {
+  public boolean isParent(Node n) {
 return n.getNetworkLocation().equals(getPath(this));
   }
 
   /* Return a child name of this node who is an ancestor of node n */
-  protected String getNextAncestorName(Node n) {
+  public String getNextAncestorName(Node n) {
 if (!isAncestor(n)) {
   throw new IllegalArgumentException(
  this + "is not an ancestor of " + n);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3ea11df/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
index a20d5fc..bec0fe1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
@@ -308,7 +308,7 @@ public class NetworkTopologyWithNodeGroup extends 
NetworkTopology {
 }
 
 @Override
-boolean isRack() {
+public boolean isRack() {
   // it is node group
   if (getChildren().isEmpty()) {
 return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3ea11df/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
new file mode 100644
index 000..ee83dba
--- 

  1   2   >