[4/6] hadoop git commit: HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit. Contribuited by Konstantin Shvachko.

2017-11-28 Thread shv
HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead 
to NameNode exit. Contribuited by Konstantin Shvachko.

(cherry picked from commit d331762f24b3f22f609366740c9c4f449edc61ac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3219b1bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3219b1bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3219b1bd

Branch: refs/heads/branch-2.8
Commit: 3219b1bdf6d6a8a86ed1db1491df63a4748ad6de
Parents: 2976d04
Author: Konstantin V Shvachko 
Authored: Tue Nov 28 17:14:23 2017 -0800
Committer: Konstantin V Shvachko 
Committed: Tue Nov 28 17:56:15 2017 -0800

--
 .../hdfs/server/blockmanagement/BlockManager.java |  2 --
 .../server/blockmanagement/ReplicationWork.java   | 18 ++
 2 files changed, 14 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3219b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3cc66bf..e2bdfcb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1527,8 +1527,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   // choose replication targets: NOT HOLDING THE GLOBAL LOCK
-  // It is costly to extract the filename for which chooseTargets is 
called,
-  // so for now we pass in the block collection itself.
   rw.chooseTargets(blockplacement, storagePolicySuite, excludedNodes);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3219b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index 258dfdd..8362096 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -25,7 +25,8 @@ import java.util.Set;
 
 class ReplicationWork {
   private final BlockInfo block;
-  private final BlockCollection bc;
+  private final String srcPath;
+  private final byte storagePolicyID;
   private final DatanodeDescriptor srcNode;
   private final int additionalReplRequired;
   private final int priority;
@@ -38,7 +39,8 @@ class ReplicationWork {
   List liveReplicaStorages, int 
additionalReplRequired,
   int priority) {
 this.block = block;
-this.bc = bc;
+this.srcPath = bc.getName();
+this.storagePolicyID = bc.getStoragePolicyID();
 this.srcNode = srcNode;
 this.srcNode.incrementPendingReplicationWithoutTargets();
 this.containingNodes = containingNodes;
@@ -52,10 +54,10 @@ class ReplicationWork {
   BlockStoragePolicySuite storagePolicySuite,
   Set excludedNodes) {
 try {
-  targets = blockplacement.chooseTarget(bc.getName(),
+  targets = blockplacement.chooseTarget(getSrcPath(),
   additionalReplRequired, srcNode, liveReplicaStorages, false,
   excludedNodes, block.getNumBytes(),
-  storagePolicySuite.getPolicy(bc.getStoragePolicyID()), null);
+  storagePolicySuite.getPolicy(getStoragePolicyID()), null);
 } finally {
   srcNode.decrementPendingReplicationWithoutTargets();
 }
@@ -84,4 +86,12 @@ class ReplicationWork {
   public DatanodeDescriptor getSrcNode() {
 return srcNode;
   }
+
+  public String getSrcPath() {
+return srcPath;
+  }
+
+  public byte getStoragePolicyID() {
+return storagePolicyID;
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[6/6] hadoop git commit: HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit. Contribuited by Konstantin Shvachko.

2017-11-28 Thread shv
HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead 
to NameNode exit. Contribuited by Konstantin Shvachko.

(cherry picked from commit d331762f24b3f22f609366740c9c4f449edc61ac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f9f0055
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f9f0055
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f9f0055

Branch: refs/heads/branch-2.9
Commit: 3f9f0055eba7a3b8b9f8778f27e87b2e7be15c96
Parents: 83c4075
Author: Konstantin V Shvachko 
Authored: Tue Nov 28 17:14:23 2017 -0800
Committer: Konstantin V Shvachko 
Committed: Tue Nov 28 18:11:38 2017 -0800

--
 .../hdfs/server/blockmanagement/BlockManager.java |  2 --
 .../server/blockmanagement/ReplicationWork.java   | 18 ++
 2 files changed, 14 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f9f0055/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a49cb24..bdb926c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1592,8 +1592,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   // choose replication targets: NOT HOLDING THE GLOBAL LOCK
-  // It is costly to extract the filename for which chooseTargets is 
called,
-  // so for now we pass in the block collection itself.
   rw.chooseTargets(blockplacement, storagePolicySuite, excludedNodes);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f9f0055/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index 258dfdd..8362096 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -25,7 +25,8 @@ import java.util.Set;
 
 class ReplicationWork {
   private final BlockInfo block;
-  private final BlockCollection bc;
+  private final String srcPath;
+  private final byte storagePolicyID;
   private final DatanodeDescriptor srcNode;
   private final int additionalReplRequired;
   private final int priority;
@@ -38,7 +39,8 @@ class ReplicationWork {
   List liveReplicaStorages, int 
additionalReplRequired,
   int priority) {
 this.block = block;
-this.bc = bc;
+this.srcPath = bc.getName();
+this.storagePolicyID = bc.getStoragePolicyID();
 this.srcNode = srcNode;
 this.srcNode.incrementPendingReplicationWithoutTargets();
 this.containingNodes = containingNodes;
@@ -52,10 +54,10 @@ class ReplicationWork {
   BlockStoragePolicySuite storagePolicySuite,
   Set excludedNodes) {
 try {
-  targets = blockplacement.chooseTarget(bc.getName(),
+  targets = blockplacement.chooseTarget(getSrcPath(),
   additionalReplRequired, srcNode, liveReplicaStorages, false,
   excludedNodes, block.getNumBytes(),
-  storagePolicySuite.getPolicy(bc.getStoragePolicyID()), null);
+  storagePolicySuite.getPolicy(getStoragePolicyID()), null);
 } finally {
   srcNode.decrementPendingReplicationWithoutTargets();
 }
@@ -84,4 +86,12 @@ class ReplicationWork {
   public DatanodeDescriptor getSrcNode() {
 return srcNode;
   }
+
+  public String getSrcPath() {
+return srcPath;
+  }
+
+  public byte getStoragePolicyID() {
+return storagePolicyID;
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/6] hadoop git commit: HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit. Contribuited by Konstantin Shvachko.

2017-11-28 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5c37a0b84 -> 9d406e5dc
  refs/heads/branch-2.7 0da13b90f -> 7252e18c3
  refs/heads/branch-2.8 2976d04de -> 3219b1bdf
  refs/heads/branch-2.9 83c40758f -> 3f9f0055e
  refs/heads/branch-3.0 a4f1e3036 -> 4cbd5ea42
  refs/heads/trunk 30941d99c -> d331762f2


HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead 
to NameNode exit. Contribuited by Konstantin Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d331762f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d331762f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d331762f

Branch: refs/heads/trunk
Commit: d331762f24b3f22f609366740c9c4f449edc61ac
Parents: 30941d9
Author: Konstantin V Shvachko 
Authored: Tue Nov 28 17:14:23 2017 -0800
Committer: Konstantin V Shvachko 
Committed: Tue Nov 28 17:14:23 2017 -0800

--
 .../hdfs/server/blockmanagement/BlockManager.java |  2 --
 .../blockmanagement/BlockReconstructionWork.java  | 14 ++
 .../server/blockmanagement/ErasureCodingWork.java |  4 ++--
 .../hdfs/server/blockmanagement/ReplicationWork.java  |  4 ++--
 4 files changed, 14 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d331762f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bdabd81..4986027 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1825,8 +1825,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   // choose replication targets: NOT HOLDING THE GLOBAL LOCK
-  // It is costly to extract the filename for which chooseTargets is 
called,
-  // so for now we pass in the block collection itself.
   final BlockPlacementPolicy placementPolicy =
   placementPolicies.getPolicy(rw.getBlock().getBlockType());
   rw.chooseTargets(placementPolicy, storagePolicySuite, excludedNodes);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d331762f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
index 57121bd..3f591e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
@@ -32,7 +32,8 @@ import java.util.Set;
 abstract class BlockReconstructionWork {
   private final BlockInfo block;
 
-  private final BlockCollection bc;
+  private final String srcPath;
+  private final byte storagePolicyID;
 
   /**
* An erasure coding reconstruction task has multiple source nodes.
@@ -57,7 +58,8 @@ abstract class BlockReconstructionWork {
   int additionalReplRequired,
   int priority) {
 this.block = block;
-this.bc = bc;
+this.srcPath = bc.getName();
+this.storagePolicyID = bc.getStoragePolicyID();
 this.srcNodes = srcNodes;
 this.containingNodes = containingNodes;
 this.liveReplicaStorages = liveReplicaStorages;
@@ -94,8 +96,12 @@ abstract class BlockReconstructionWork {
 return srcNodes;
   }
 
-  BlockCollection getBc() {
-return bc;
+  public String getSrcPath() {
+return srcPath;
+  }
+
+  public byte getStoragePolicyID() {
+return storagePolicyID;
   }
 
   List getLiveReplicaStorages() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d331762f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
index 0ae6f0f..a23b1d5 

[42/50] [abbrv] hadoop git commit: HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when numberOfPagesRemaining is 0. Contributed by Rajesh Balamohan

2017-11-28 Thread kkaranasos
HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when 
numberOfPagesRemaining is 0.
Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ea182d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ea182d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ea182d0

Branch: refs/heads/YARN-6592
Commit: 0ea182d0faa35c726dcb37249d48786bfc8ca04c
Parents: 94bed50
Author: Steve Loughran 
Authored: Tue Nov 28 11:52:59 2017 +
Committer: Steve Loughran 
Committed: Tue Nov 28 11:52:59 2017 +

--
 .../java/org/apache/hadoop/fs/azure/PageBlobInputStream.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ea182d0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 097201b..aaac490 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -343,9 +343,9 @@ final class PageBlobInputStream extends InputStream {
 
 // Skip over whole pages as necessary without retrieving them from the
 // server.
-long pagesToSkipOver = Math.min(
+long pagesToSkipOver = Math.max(0, Math.min(
 n / PAGE_DATA_SIZE,
-numberOfPagesRemaining - 1);
+numberOfPagesRemaining - 1));
 numberOfPagesRemaining -= pagesToSkipOver;
 currentOffsetInBlob += pagesToSkipOver * PAGE_SIZE;
 skipped += pagesToSkipOver * PAGE_DATA_SIZE;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints.
Contributed by Steve Loughran and Ryan Blue.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de8b6ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de8b6ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de8b6ca5

Branch: refs/heads/YARN-6592
Commit: de8b6ca5ef8614de6d6277b7617e27c788b0555c
Parents: 782ba3b
Author: Steve Loughran 
Authored: Wed Nov 22 15:28:12 2017 +
Committer: Steve Loughran 
Committed: Wed Nov 22 15:28:12 2017 +

--
 .../dev-support/findbugsExcludeFile.xml |7 +
 .../apache/hadoop/fs/FSDataOutputStream.java|9 +
 .../apache/hadoop/fs/PathExistsException.java   |4 +-
 .../org/apache/hadoop/fs/StorageStatistics.java |5 +
 .../apache/hadoop/util/JsonSerialization.java   |  299 +++
 .../src/main/resources/core-default.xml |  117 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   51 +-
 .../apache/hadoop/test/GenericTestUtils.java|   29 +-
 .../org/apache/hadoop/test/HadoopTestBase.java  |   51 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  144 +-
 .../hadoop/util/TestJsonSerialization.java  |  185 ++
 .../mapreduce/TestMapreduceConfigFields.java|   27 +-
 .../lib/output/BindingPathOutputCommitter.java  |  184 ++
 .../lib/output/FileOutputCommitter.java |   12 +-
 .../lib/output/FileOutputCommitterFactory.java  |   38 +
 .../mapreduce/lib/output/FileOutputFormat.java  |   10 +-
 .../lib/output/NamedCommitterFactory.java   |   79 +
 .../lib/output/PathOutputCommitter.java |   17 +
 .../lib/output/PathOutputCommitterFactory.java  |  204 ++
 .../src/main/resources/mapred-default.xml   |   22 +
 .../lib/output/TestPathOutputCommitter.java |   24 +-
 .../output/TestPathOutputCommitterFactory.java  |  495 +
 hadoop-tools/hadoop-aws/pom.xml |   46 +-
 .../hadoop/fs/s3a/AWSBadRequestException.java   |   42 +
 .../hadoop/fs/s3a/AWSClientIOException.java |3 +-
 .../hadoop/fs/s3a/AWSNoResponseException.java   |   31 +
 .../hadoop/fs/s3a/AWSRedirectException.java |   38 +
 .../fs/s3a/AWSServiceThrottledException.java|   42 +
 .../hadoop/fs/s3a/AWSStatus500Exception.java|   37 +
 .../s3a/BlockingThreadPoolExecutorService.java  |2 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   72 +-
 .../fs/s3a/InconsistentAmazonS3Client.java  |  232 ++-
 .../java/org/apache/hadoop/fs/s3a/Invoker.java  |  485 +
 .../java/org/apache/hadoop/fs/s3a/Listing.java  |   26 +-
 .../java/org/apache/hadoop/fs/s3a/Retries.java  |   92 +
 .../hadoop/fs/s3a/S3ABlockOutputStream.java |  307 +--
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java |2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  940 +
 .../apache/hadoop/fs/s3a/S3AInputStream.java|   56 +-
 .../hadoop/fs/s3a/S3AInstrumentation.java   |  231 ++-
 .../apache/hadoop/fs/s3a/S3ARetryPolicy.java|  246 +++
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   12 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  324 ++-
 .../org/apache/hadoop/fs/s3a/S3ListRequest.java |   11 +
 .../hadoop/fs/s3a/S3ObjectAttributes.java   |   10 +-
 .../org/apache/hadoop/fs/s3a/Statistic.java |   56 +-
 .../hadoop/fs/s3a/WriteOperationHelper.java |  474 +
 .../fs/s3a/commit/AbstractS3ACommitter.java |  756 +++
 .../s3a/commit/AbstractS3ACommitterFactory.java |   90 +
 .../hadoop/fs/s3a/commit/CommitConstants.java   |  240 +++
 .../hadoop/fs/s3a/commit/CommitOperations.java  |  596 ++
 .../hadoop/fs/s3a/commit/CommitUtils.java   |  129 ++
 .../hadoop/fs/s3a/commit/CommitUtilsWithMR.java |  192 ++
 .../apache/hadoop/fs/s3a/commit/Duration.java   |   60 +
 .../hadoop/fs/s3a/commit/DurationInfo.java  |   59 +
 .../s3a/commit/InternalCommitterConstants.java  |  100 +
 .../hadoop/fs/s3a/commit/LocalTempDir.java  |   80 +
 .../fs/s3a/commit/MagicCommitIntegration.java   |  182 ++
 .../hadoop/fs/s3a/commit/MagicCommitPaths.java  |  229 ++
 .../fs/s3a/commit/PathCommitException.java  |   43 +
 .../apache/hadoop/fs/s3a/commit/PutTracker.java |  100 +
 .../fs/s3a/commit/S3ACommitterFactory.java  |  129 ++
 .../org/apache/hadoop/fs/s3a/commit/Tasks.java  |  410 
 .../hadoop/fs/s3a/commit/ValidationFailure.java |   53 +
 .../hadoop/fs/s3a/commit/files/PendingSet.java  |  192 ++
 .../s3a/commit/files/PersistentCommitData.java  |   69 +
 .../s3a/commit/files/SinglePendingCommit.java   |  432 
 .../hadoop/fs/s3a/commit/files/SuccessData.java |  322 +++
 .../fs/s3a/commit/files/package-info.java   |   45 +
 .../fs/s3a/commit/magic/MagicCommitTracker.java |  161 ++
 .../s3a/commit/magic/MagicS3GuardCommitter.java |  288 +++
 .../magic/MagicS3GuardCommitterFactory.java |   47 +
 

[43/50] [abbrv] hadoop git commit: YARN-7499. Layout changes to Application details page in new YARN UI. Contributed by Vasudevan Skm.

2017-11-28 Thread kkaranasos
YARN-7499. Layout changes to Application details page in new YARN UI. 
Contributed by Vasudevan Skm.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/641ba5c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/641ba5c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/641ba5c7

Branch: refs/heads/YARN-6592
Commit: 641ba5c7a1471f8d799b1f919cd41daffb9da84e
Parents: 0ea182d
Author: Sunil G 
Authored: Tue Nov 28 18:37:11 2017 +0530
Committer: Sunil G 
Committed: Tue Nov 28 18:37:11 2017 +0530

--
 .../webapp/app/controllers/app-table-columns.js |   4 +-
 .../src/main/webapp/app/controllers/yarn-app.js |  69 -
 .../webapp/app/controllers/yarn-flowrun/info.js |   2 +-
 .../src/main/webapp/app/models/yarn-app.js  |   6 +-
 .../src/main/webapp/app/router.js   |  12 +-
 .../src/main/webapp/app/routes/yarn-app.js  |  23 +-
 .../main/webapp/app/routes/yarn-app/attempts.js |  15 +-
 .../main/webapp/app/routes/yarn-app/charts.js   |  18 +-
 .../webapp/app/routes/yarn-app/components.js|  16 +-
 .../main/webapp/app/routes/yarn-app/configs.js  |  16 +-
 .../src/main/webapp/app/routes/yarn-app/info.js |  17 +-
 .../src/main/webapp/app/serializers/yarn-app.js |   2 +-
 .../src/main/webapp/app/styles/app.scss |  24 ++
 .../src/main/webapp/app/styles/colors.scss  |   2 +
 .../src/main/webapp/app/styles/layout.scss  |  42 +++
 .../src/main/webapp/app/styles/variables.scss   |   4 +
 .../src/main/webapp/app/styles/yarn-app.scss|  35 +++
 .../app/templates/components/timeline-view.hbs  |   2 +-
 .../src/main/webapp/app/templates/yarn-app.hbs  | 149 +++---
 .../webapp/app/templates/yarn-app/attempts.hbs  |   2 +-
 .../webapp/app/templates/yarn-app/charts.hbs|  46 ++-
 .../app/templates/yarn-app/components.hbs   |   6 +-
 .../webapp/app/templates/yarn-app/configs.hbs   |  58 ++--
 .../main/webapp/app/templates/yarn-app/info.hbs | 281 +--
 .../webapp/app/templates/yarn-app/loading.hbs   |   2 +-
 .../main/webapp/app/templates/yarn-services.hbs |   2 +-
 26 files changed, 518 insertions(+), 337 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/641ba5c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index 05bfad45..a87acc1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -39,7 +39,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.id,
-  href: `#/yarn-app/${row.id}/info`
+  href: `#/yarn-app/${row.id}/attempts`
 };
   }
   }, {
@@ -120,7 +120,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.get('appName'),
-  href: `#/yarn-app/${row.id}/info?service=${row.get('appName')}`
+  href: `#/yarn-app/${row.id}/attempts?service=${row.get('appName')}`
 };
   }
 }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/641ba5c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index c40697f..b84f328 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -32,6 +32,65 @@ export default Ember.Controller.extend({
 text: 'App'
   }],
 
+  actions: {
+showStopServiceConfirm() {
+  this.set('actionResponse', null);
+  Ember.$("#stopServiceConfirmDialog").modal('show');
+},
+
+stopService() {
+  var self = this;
+  Ember.$("#stopServiceConfirmDialog").modal('hide');
+  var adapter = this.store.adapterFor('yarn-servicedef');
+  self.set('isLoading', true);
+  adapter.stopService(this.model.serviceName).then(function () {
+self.set('actionResponse', { msg: 'Service stopped successfully. Auto 
refreshing in 5 seconds.', type: 'success' });
+ 

[09/50] [abbrv] hadoop git commit: HDFS-12813. RequestHedgingProxyProvider can hide Exception thrown from the Namenode for proxy size of 1. Contributed by Mukul Kumar Singh

2017-11-28 Thread kkaranasos
HDFS-12813.  RequestHedgingProxyProvider can hide Exception thrown from the 
Namenode for proxy size of 1.  Contributed by Mukul Kumar Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/659e85e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/659e85e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/659e85e3

Branch: refs/heads/YARN-6592
Commit: 659e85e304d070f9908a96cf6a0e1cbafde6a434
Parents: 60fc2a1
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Nov 20 17:09:19 2017 -0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Nov 20 17:09:19 2017 -0800

--
 .../ha/RequestHedgingProxyProvider.java | 81 ++--
 .../ha/TestRequestHedgingProxyProvider.java | 58 ++
 2 files changed, 114 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/659e85e3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index b94e94d..08edfe2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.net.URI;
@@ -29,6 +30,7 @@ import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.RemoteException;
@@ -87,9 +89,19 @@ public class RequestHedgingProxyProvider extends
 targetProxies.remove(toIgnore);
 if (targetProxies.size() == 1) {
   ProxyInfo proxyInfo = targetProxies.values().iterator().next();
-  Object retVal = method.invoke(proxyInfo.proxy, args);
-  successfulProxy = proxyInfo;
-  return retVal;
+  try {
+currentUsedProxy = proxyInfo;
+Object retVal = method.invoke(proxyInfo.proxy, args);
+LOG.debug("Invocation successful on [{}]",
+currentUsedProxy.proxyInfo);
+return retVal;
+  } catch (InvocationTargetException ex) {
+Exception unwrappedException = unwrapInvocationTargetException(ex);
+logProxyException(unwrappedException, currentUsedProxy.proxyInfo);
+LOG.trace("Unsuccessful invocation on [{}]",
+currentUsedProxy.proxyInfo);
+throw unwrappedException;
+  }
 }
 executor = Executors.newFixedThreadPool(proxies.size());
 completionService = new ExecutorCompletionService<>(executor);
@@ -112,15 +124,16 @@ public class RequestHedgingProxyProvider extends
   Future callResultFuture = completionService.take();
   Object retVal;
   try {
+currentUsedProxy = proxyMap.get(callResultFuture);
 retVal = callResultFuture.get();
-successfulProxy = proxyMap.get(callResultFuture);
 LOG.debug("Invocation successful on [{}]",
-successfulProxy.proxyInfo);
+currentUsedProxy.proxyInfo);
 return retVal;
-  } catch (Exception ex) {
+  } catch (ExecutionException ex) {
+Exception unwrappedException = unwrapExecutionException(ex);
 ProxyInfo tProxyInfo = proxyMap.get(callResultFuture);
-logProxyException(ex, tProxyInfo.proxyInfo);
-badResults.put(tProxyInfo.proxyInfo, unwrapException(ex));
+logProxyException(unwrappedException, tProxyInfo.proxyInfo);
+badResults.put(tProxyInfo.proxyInfo, unwrappedException);
 LOG.trace("Unsuccessful invocation on [{}]", tProxyInfo.proxyInfo);
 numAttempts--;
   }
@@ -143,7 +156,7 @@ public class RequestHedgingProxyProvider extends
   }
 
 
-  private volatile ProxyInfo successfulProxy = null;
+  private volatile ProxyInfo currentUsedProxy = null;
   private volatile String toIgnore = null;
 
   public 

[34/50] [abbrv] hadoop git commit: YARN-6483. Add nodes transitioning to DECOMMISSIONING state to the list of updated nodes returned to the AM. (Juan Rodriguez Hortala via asuresh)

2017-11-28 Thread kkaranasos
YARN-6483. Add nodes transitioning to DECOMMISSIONING state to the list of 
updated nodes returned to the AM. (Juan Rodriguez Hortala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b46ca7e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b46ca7e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b46ca7e7

Branch: refs/heads/YARN-6592
Commit: b46ca7e73b8bac3fdbff0b13afe009308078acf2
Parents: aab4395
Author: Arun Suresh 
Authored: Wed Nov 22 19:16:44 2017 -0800
Committer: Arun Suresh 
Committed: Wed Nov 22 19:18:30 2017 -0800

--
 .../hadoop/yarn/api/records/NodeReport.java |  47 ++--
 .../hadoop/yarn/api/records/NodeUpdateType.java |  29 +
 .../src/main/proto/yarn_protos.proto|   8 ++
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  14 +--
 .../hadoop/yarn/client/cli/TestYarnCLI.java |   2 +-
 .../api/records/impl/pb/NodeReportPBImpl.java   |  50 +++-
 .../yarn/api/records/impl/pb/ProtoUtils.java|  12 ++
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  14 ++-
 .../server/resourcemanager/ClientRMService.java |   5 +-
 .../DecommissioningNodesWatcher.java|  38 +-
 .../resourcemanager/DefaultAMSProcessor.java|  12 +-
 .../resourcemanager/NodesListManager.java   |  78 +
 .../NodesListManagerEventType.java  |   3 +-
 .../server/resourcemanager/rmapp/RMApp.java |  10 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  11 +-
 .../rmapp/RMAppNodeUpdateEvent.java |   9 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   2 +-
 .../resourcemanager/rmnode/RMNodeImpl.java  |   5 +
 .../yarn/server/resourcemanager/MockRM.java |  15 +++
 .../resourcemanager/TestClientRMService.java|  50 
 .../TestDecommissioningNodesWatcher.java|   4 +-
 .../resourcemanager/TestRMNodeTransitions.java  |  13 ++-
 .../TestResourceTrackerService.java | 116 ++-
 .../applicationsmanager/MockAsm.java|   4 +-
 .../TestAMRMRPCNodeUpdates.java |  51 
 .../server/resourcemanager/rmapp/MockRMApp.java |   4 +-
 26 files changed, 495 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46ca7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
index 885a3b4..3a80641 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
@@ -53,7 +53,8 @@ public abstract class NodeReport {
   String httpAddress, String rackName, Resource used, Resource capability,
   int numContainers, String healthReport, long lastHealthReportTime) {
 return newInstance(nodeId, nodeState, httpAddress, rackName, used,
-capability, numContainers, healthReport, lastHealthReportTime, null);
+capability, numContainers, healthReport, lastHealthReportTime,
+null, null, null);
   }
 
   @Private
@@ -61,7 +62,8 @@ public abstract class NodeReport {
   public static NodeReport newInstance(NodeId nodeId, NodeState nodeState,
   String httpAddress, String rackName, Resource used, Resource capability,
   int numContainers, String healthReport, long lastHealthReportTime,
-  Set nodeLabels) {
+  Set nodeLabels, Integer decommissioningTimeout,
+  NodeUpdateType nodeUpdateType) {
 NodeReport nodeReport = Records.newRecord(NodeReport.class);
 nodeReport.setNodeId(nodeId);
 nodeReport.setNodeState(nodeState);
@@ -73,6 +75,8 @@ public abstract class NodeReport {
 nodeReport.setHealthReport(healthReport);
 nodeReport.setLastHealthReportTime(lastHealthReportTime);
 nodeReport.setNodeLabels(nodeLabels);
+nodeReport.setDecommissioningTimeout(decommissioningTimeout);
+nodeReport.setNodeUpdateType(nodeUpdateType);
 return nodeReport;
   }
 
@@ -186,8 +190,8 @@ public abstract class NodeReport {
   public abstract void setLastHealthReportTime(long lastHealthReport);
   
   /**
-   * Get labels of this node
-   * @return labels of this node
+   * Get labels of this node.
+   * @return labels of this node.
*/
   @Public
   @Stable
@@ -198,8 +202,8 @@ public abstract class NodeReport {
   public abstract void setNodeLabels(Set nodeLabels);
 
   /**

[24/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
index da1fc5a..ef5a434 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
@@ -24,7 +24,12 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricStringBuilder;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
@@ -122,8 +127,23 @@ public class S3AInstrumentation {
   STREAM_WRITE_BLOCK_UPLOADS_ABORTED,
   STREAM_WRITE_TOTAL_TIME,
   STREAM_WRITE_TOTAL_DATA,
+  COMMITTER_COMMITS_CREATED,
+  COMMITTER_COMMITS_COMPLETED,
+  COMMITTER_JOBS_SUCCEEDED,
+  COMMITTER_JOBS_FAILED,
+  COMMITTER_TASKS_SUCCEEDED,
+  COMMITTER_TASKS_FAILED,
+  COMMITTER_BYTES_COMMITTED,
+  COMMITTER_BYTES_UPLOADED,
+  COMMITTER_COMMITS_FAILED,
+  COMMITTER_COMMITS_ABORTED,
+  COMMITTER_COMMITS_REVERTED,
+  COMMITTER_MAGIC_FILES_CREATED,
   S3GUARD_METADATASTORE_PUT_PATH_REQUEST,
-  S3GUARD_METADATASTORE_INITIALIZATION
+  S3GUARD_METADATASTORE_INITIALIZATION,
+  S3GUARD_METADATASTORE_RETRY,
+  S3GUARD_METADATASTORE_THROTTLED,
+  STORE_IO_THROTTLED
   };
 
 
@@ -179,8 +199,11 @@ public class S3AInstrumentation {
   gauge(statistic.getSymbol(), statistic.getDescription());
 }
 //todo need a config for the quantiles interval?
+int interval = 1;
 quantiles(S3GUARD_METADATASTORE_PUT_PATH_LATENCY,
-"ops", "latency", 1);
+"ops", "latency", interval);
+quantiles(S3GUARD_METADATASTORE_THROTTLE_RATE,
+"events", "frequency (Hz)", interval);
   }
 
   /**
@@ -372,7 +395,7 @@ public class S3AInstrumentation {
   }
 
   /**
-   * Indicate that S3A deleted one or more file.s
+   * Indicate that S3A deleted one or more files.
* @param count number of files.
*/
   public void fileDeleted(int count) {
@@ -506,6 +529,14 @@ public class S3AInstrumentation {
   }
 
   /**
+   * Create a new instance of the committer statistics.
+   * @return a new committer statistics instance
+   */
+  CommitterStatistics newCommitterStatistics() {
+return new CommitterStatistics();
+  }
+
+  /**
* Merge in the statistics of a single input stream into
* the filesystem-wide statistics.
* @param statistics stream statistics
@@ -584,9 +615,12 @@ public class S3AInstrumentation {
 
 /**
  * The inner stream was opened.
+ * @return the previous count
  */
-public void streamOpened() {
+public long streamOpened() {
+  long count = openOperations;
   openOperations++;
+  return count;
 }
 
 /**
@@ -810,10 +844,13 @@ public class S3AInstrumentation {
 }
 
 /**
- * Note an exception in a multipart complete.
+ * Note exception in a multipart complete.
+ * @param count count of exceptions
  */
-void exceptionInMultipartComplete() {
-  exceptionsInMultipartFinalize.incrementAndGet();
+void exceptionInMultipartComplete(int count) {
+  if (count > 0) {
+exceptionsInMultipartFinalize.addAndGet(count);
+  }
 }
 
 /**
@@ -832,6 +869,15 @@ public class S3AInstrumentation {
 }
 
 /**
+ * Data has been uploaded to be committed in a subsequent operation;
+ * to be called at the end of the write.
+ * @param size size in bytes
+ */
+public void commitUploaded(long size) {
+  incrementCounter(COMMITTER_BYTES_UPLOADED, size);
+}
+
+/**
  * Output stream has closed.
  * Trigger merge in of all statistics not updated during operation.
  */
@@ -918,5 +964,176 @@ public class S3AInstrumentation {
 public void storeClosed() {
 
 }
+
+/**
+ * Throttled request.
+ */
+public void throttled() {
+  incrementCounter(S3GUARD_METADATASTORE_THROTTLED, 1);
+  addValueToQuantiles(S3GUARD_METADATASTORE_THROTTLE_RATE, 1);
+}
+
+/**
+ * S3Guard is retrying after a (retryable) failure.
+ */
+public void retrying() {
+  

[04/50] [abbrv] hadoop git commit: YARN-7529. TestYarnNativeServices#testRecoverComponentsAfterRMRestart() fails intermittently. Contributed by Chandni Singh

2017-11-28 Thread kkaranasos
YARN-7529. TestYarnNativeServices#testRecoverComponentsAfterRMRestart() fails 
intermittently. Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f9d7a14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f9d7a14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f9d7a14

Branch: refs/heads/YARN-6592
Commit: 6f9d7a146d5940a9e8a7913c19b43b265d6bfa32
Parents: 6903cf0
Author: Billie Rinaldi 
Authored: Mon Nov 20 07:37:04 2017 -0800
Committer: Billie Rinaldi 
Committed: Mon Nov 20 07:37:04 2017 -0800

--
 .../yarn/service/TestYarnNativeServices.java| 42 +---
 1 file changed, 18 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f9d7a14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index f98d90a..1c517d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -176,7 +176,8 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 ServiceClient client = createClient();
 Service exampleApp = createExampleApplication();
 client.actionCreate(exampleApp);
-waitForAllCompToBeReady(client, exampleApp);
+Multimap containersBeforeFailure =
+waitForAllCompToBeReady(client, exampleApp);
 
 LOG.info("Restart the resource manager");
 getYarnCluster().restartResourceManager(
@@ -191,9 +192,6 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 ApplicationAttemptId applicationAttemptId = client.getYarnClient()
 .getApplicationReport(exampleAppId).getCurrentApplicationAttemptId();
 
-Multimap containersBeforeFailure = getContainersForAllComp(
-client, exampleApp);
-
 LOG.info("Fail the application attempt {}", applicationAttemptId);
 client.getYarnClient().failApplicationAttempt(applicationAttemptId);
 //wait until attempt 2 is running
@@ -208,7 +206,7 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
   }
 }, 2000, 20);
 
-Multimap containersAfterFailure = getContainersForAllComp(
+Multimap containersAfterFailure = waitForAllCompToBeReady(
 client, exampleApp);
 Assert.assertEquals("component container affected by restart",
 containersBeforeFailure, containersAfterFailure);
@@ -318,14 +316,26 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 }, 2000, 20);
   }
 
-  // wait until all the containers for all components become ready state
-  private void waitForAllCompToBeReady(ServiceClient client,
+  /**
+   * Wait until all the containers for all components become ready state.
+   *
+   * @param client
+   * @param exampleApp
+   * @return all ready containers of a service.
+   * @throws TimeoutException
+   * @throws InterruptedException
+   */
+  private Multimap waitForAllCompToBeReady(ServiceClient 
client,
   Service exampleApp) throws TimeoutException, InterruptedException {
 int expectedTotalContainers = countTotalContainers(exampleApp);
+
+Multimap allContainers = HashMultimap.create();
+
 GenericTestUtils.waitFor(() -> {
   try {
 Service retrievedApp = client.getStatus(exampleApp.getName());
 int totalReadyContainers = 0;
+allContainers.clear();
 LOG.info("Num Components " + retrievedApp.getComponents().size());
 for (Component component : retrievedApp.getComponents()) {
   LOG.info("looking for  " + component.getName());
@@ -339,6 +349,7 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 + component.getName());
 if (container.getState() == ContainerState.READY) {
   totalReadyContainers++;
+  allContainers.put(component.getName(), container.getId());
   

[26/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java
new file mode 100644
index 000..13e1c61
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.lib.output;
+
+import java.io.IOException;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+
+import static 
org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory.*;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+/**
+ * Test the committer factory logic, looking at the override
+ * and fallback behavior.
+ */
+@SuppressWarnings("unchecked")
+public class TestPathOutputCommitterFactory extends Assert {
+
+  private static final String HTTP_COMMITTER_FACTORY = String.format(
+  COMMITTER_FACTORY_SCHEME_PATTERN, "http");
+
+  private static final Path HTTP_PATH = new Path("http://hadoop.apache.org/;);
+  private static final Path HDFS_PATH = new Path("hdfs://localhost:8081/");
+
+  private TaskAttemptID taskAttemptID =
+  new TaskAttemptID("local", 0, TaskType.MAP, 1, 2);
+
+  /**
+   * Set a factory for a schema, verify it works.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testCommitterFactoryForSchema() throws Throwable {
+createCommitterFactory(SimpleCommitterFactory.class,
+HTTP_PATH,
+newBondedConfiguration());
+  }
+
+  /**
+   * A schema factory only affects that filesystem.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testCommitterFactoryFallbackDefault() throws Throwable {
+createCommitterFactory(FileOutputCommitterFactory.class,
+HDFS_PATH,
+newBondedConfiguration());
+  }
+
+  /**
+   * A schema factory only affects that filesystem; test through
+   * {@link PathOutputCommitterFactory#createCommitter(Path, 
TaskAttemptContext)}.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testCommitterFallbackDefault() throws Throwable {
+createCommitter(FileOutputCommitter.class,
+HDFS_PATH,
+taskAttempt(newBondedConfiguration()));
+  }
+
+  /**
+   * Verify that you can override any schema with an explicit name.
+   */
+  @Test
+  public void testCommitterFactoryOverride() throws Throwable {
+Configuration conf = newBondedConfiguration();
+// set up for the schema factory
+// and then set a global one which overrides the others.
+conf.set(COMMITTER_FACTORY_CLASS, OtherFactory.class.getName());
+createCommitterFactory(OtherFactory.class, HDFS_PATH, conf);
+createCommitterFactory(OtherFactory.class, HTTP_PATH, conf);
+  }
+
+  /**
+   * Verify that if the factory class option is "", schema factory
+   * resolution still works.
+   */
+  @Test
+  public void testCommitterFactoryEmptyOption() throws Throwable {
+Configuration conf = newBondedConfiguration();
+// set up for the schema factory
+// and then set a global one which overrides the others.
+conf.set(COMMITTER_FACTORY_CLASS, "");
+createCommitterFactory(SimpleCommitterFactory.class, HTTP_PATH, conf);
+
+// and HDFS, with no schema, falls back to the default
+

[41/50] [abbrv] hadoop git commit: HDFS-12858. Add router admin commands usage in HDFS commands reference doc. Contributed by Yiqun Lin.

2017-11-28 Thread kkaranasos
HDFS-12858. Add router admin commands usage in HDFS commands reference doc. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94bed504
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94bed504
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94bed504

Branch: refs/heads/YARN-6592
Commit: 94bed5047113fb148194380853ff01e92897a91f
Parents: d8923cd
Author: Yiqun Lin 
Authored: Tue Nov 28 11:48:55 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Nov 28 11:48:55 2017 +0800

--
 .../src/site/markdown/HDFSCommands.md   | 23 
 1 file changed, 23 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94bed504/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index c5f80d0..d8462c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -414,6 +414,29 @@ Usage:
 
 Runs a HDFS dfsadmin client.
 
+### `dfsrouter`
+
+Usage: `hdfs dfsrouter`
+
+Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more 
info.
+
+### `dfsrouteradmin`
+
+Usage:
+
+  hdfs dfsrouteradmin
+  [-add   ]
+  [-rm ]
+  [-ls ]
+
+| COMMAND\_OPTION | Description |
+|: |: |
+| `-add` *source* *nameservice* *destination* | Add a mount table entry or 
update if it exists. |
+| `-rm` *source* | Remove mount point of specified path. |
+| `-ls` *path* | List mount points under specified path. |
+
+The commands for managing Router-based federation. See [Mount table 
management](./HDFSRouterFederation.html#Mount_table_management) for more info.
+
 ### `diskbalancer`
 
 Usage:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: YARN-5534. Allow user provided Docker volume mount list. (Contributed by Shane Kumpf)

2017-11-28 Thread kkaranasos
YARN-5534.  Allow user provided Docker volume mount list.  (Contributed by 
Shane Kumpf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d42a336c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d42a336c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d42a336c

Branch: refs/heads/YARN-6592
Commit: d42a336cfab106d052aa30d80d9d30904123cb55
Parents: de8b6ca
Author: Eric Yang 
Authored: Wed Nov 22 13:05:34 2017 -0500
Committer: Eric Yang 
Committed: Wed Nov 22 13:05:34 2017 -0500

--
 .../runtime/DockerLinuxContainerRuntime.java|  42 +++
 .../linux/runtime/docker/DockerRunCommand.java  |  12 ++
 .../runtime/TestDockerContainerRuntime.java | 109 +++
 .../src/site/markdown/DockerContainers.md   |  48 
 4 files changed, 211 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d42a336c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 75a28e6..e61dc23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -65,6 +65,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*;
@@ -134,6 +135,16 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * source is an absolute path that is not a symlink and that points to a
  * localized resource.
  *   
+ *   
+ * {@code YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS} allows users to specify
+ + additional volume mounts for the Docker container. The value of the
+ * environment variable should be a comma-separated list of mounts.
+ * All such mounts must be given as {@code source:dest:mode}, and the mode
+ * must be "ro" (read-only) or "rw" (read-write) to specify the type of
+ * access being requested. The requested mounts will be validated by
+ * container-executor based on the values set in container-executor.cfg for
+ * {@code docker.allowed.ro-mounts} and {@code docker.allowed.rw-mounts}.
+ *   
  * 
  */
 @InterfaceAudience.Private
@@ -151,6 +162,8 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$";
   private static final Pattern hostnamePattern = Pattern.compile(
   HOSTNAME_PATTERN);
+  private static final Pattern USER_MOUNT_PATTERN = Pattern.compile(
+  "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)");
 
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
@@ -176,6 +189,9 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS =
   "YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS";
+  @InterfaceAudience.Private
+  public static final String ENV_DOCKER_CONTAINER_MOUNTS =
+  "YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS";
 
   private Configuration conf;
   private Context nmContext;
@@ -675,6 +691,32 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   }
 }
 
+if (environment.containsKey(ENV_DOCKER_CONTAINER_MOUNTS)) {
+  Matcher parsedMounts = USER_MOUNT_PATTERN.matcher(
+  environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
+  if (!parsedMounts.find()) {
+throw new ContainerExecutionException(
+"Unable to parse user supplied mount list: "
++ environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
+  }
+  parsedMounts.reset();
+  while (parsedMounts.find()) {
+String src = parsedMounts.group(1);
+String dst = 

[36/50] [abbrv] hadoop git commit: YARN-7509. AsyncScheduleThread and ResourceCommitterService are still running after RM is transitioned to standby. (Tao Yang via wangda)

2017-11-28 Thread kkaranasos
YARN-7509. AsyncScheduleThread and ResourceCommitterService are still running 
after RM is transitioned to standby. (Tao Yang via wangda)

Change-Id: I7477fe355419fd4a0a6e2bdda7319abad4c4c748


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/834e91ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/834e91ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/834e91ee

Branch: refs/heads/YARN-6592
Commit: 834e91ee91d22d74866afbf6252107e969bf8370
Parents: d162252
Author: Wangda Tan 
Authored: Thu Nov 23 19:59:03 2017 -0800
Committer: Wangda Tan 
Committed: Thu Nov 23 19:59:03 2017 -0800

--
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../TestRMHAForAsyncScheduler.java  | 155 +++
 2 files changed, 164 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e91ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index ed30ad1..218adf3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -459,7 +459,7 @@ public class CapacityScheduler extends
* Schedule on all nodes by starting at a random point.
* @param cs
*/
-  static void schedule(CapacityScheduler cs) {
+  static void schedule(CapacityScheduler cs) throws InterruptedException{
 // First randomize the start point
 int current = 0;
 Collection nodes = cs.nodeTracker.getAllNodes();
@@ -475,9 +475,7 @@ public class CapacityScheduler extends
   cs.allocateContainersToNode(node.getNodeID(), false);
 }
 
-try {
-  Thread.sleep(cs.getAsyncScheduleInterval());
-} catch (InterruptedException e) {}
+Thread.sleep(cs.getAsyncScheduleInterval());
   }
 
   static class AsyncScheduleThread extends Thread {
@@ -492,9 +490,9 @@ public class CapacityScheduler extends
 
 @Override
 public void run() {
-  while (true) {
+  while (!Thread.currentThread().isInterrupted()) {
 try {
-  if (!runSchedules.get() || Thread.currentThread().isInterrupted()) {
+  if (!runSchedules.get()) {
 Thread.sleep(100);
   } else {
 // Don't run schedule if we have some pending backlogs already
@@ -505,9 +503,11 @@ public class CapacityScheduler extends
 }
   }
 } catch (InterruptedException ie) {
-  // Do nothing
+  // keep interrupt signal
+  Thread.currentThread().interrupt();
 }
   }
+  LOG.info("AsyncScheduleThread[" + getName() + "] exited!");
 }
 
 public void beginSchedule() {
@@ -546,8 +546,10 @@ public class CapacityScheduler extends
 
 } catch (InterruptedException e) {
   LOG.error(e);
+  Thread.currentThread().interrupt();
 }
   }
+  LOG.info("ResourceCommitterService exited!");
 }
 
 public void addNewCommitRequest(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e91ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java
new file mode 100644
index 000..46d5cda
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software 

[49/50] [abbrv] hadoop git commit: YARN-6595. [API] Add Placement Constraints at the application level. (Arun Suresh via kkaranasos)

2017-11-28 Thread kkaranasos
YARN-6595. [API] Add Placement Constraints at the application level. (Arun 
Suresh via kkaranasos)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a0abf39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a0abf39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a0abf39

Branch: refs/heads/YARN-6592
Commit: 6a0abf39ff3b1db9ea686324f84e43f9ae598ae2
Parents: 3571634
Author: Konstantinos Karanasos 
Authored: Mon Nov 13 15:25:24 2017 -0800
Committer: Konstantinos Karanasos 
Committed: Tue Nov 28 13:46:29 2017 -0800

--
 .../RegisterApplicationMasterRequest.java   |  42 -
 .../yarn/api/resource/PlacementConstraint.java  | 156 +++
 .../src/main/proto/yarn_protos.proto|   6 +
 .../src/main/proto/yarn_service_protos.proto|   1 +
 .../RegisterApplicationMasterRequestPBImpl.java | 106 -
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  11 ++
 6 files changed, 313 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a0abf39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
index 395e190..f2d537a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
@@ -18,11 +18,16 @@
 
 package org.apache.hadoop.yarn.api.protocolrecords;
 
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.util.Records;
-
 /**
  * The request sent by the {@code ApplicationMaster} to {@code ResourceManager}
  * on registration.
@@ -132,4 +137,39 @@ public abstract class RegisterApplicationMasterRequest {
   @Public
   @Stable
   public abstract void setTrackingUrl(String trackingUrl);
+
+  /**
+   * Return all Placement Constraints specified at the Application level. The
+   * mapping is from a set of allocation tags to a
+   * PlacementConstraint associated with the tags, i.e., each
+   * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest} that has 
those
+   * tags will be placed taking into account the corresponding constraint.
+   *
+   * @return A map of Placement Constraints.
+   */
+  @Public
+  @Unstable
+  public Map getPlacementConstraints() {
+return new HashMap<>();
+  }
+
+  /**
+   * Set Placement Constraints applicable to the
+   * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}s
+   * of this application.
+   * The mapping is from a set of allocation tags to a
+   * PlacementConstraint associated with the tags.
+   * For example:
+   *  Map 
+   *   hb_regionserver - node_anti_affinity,
+   *   hb_regionserver, hb_master - rack_affinity,
+   *   ...
+   *  
+   * @param placementConstraints Placement Constraint Mapping.
+   */
+  @Public
+  @Unstable
+  public void setPlacementConstraints(
+  Map placementConstraints) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a0abf39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index f0e3982..b6e851a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -54,6 +54,26 @@ public class PlacementConstraint {
 return constraintExpr;
   }
 
+  

[37/50] [abbrv] hadoop git commit: YARN-7290. Method canContainerBePreempted can return true when it shouldn't. (Contributed by Steven Rand)

2017-11-28 Thread kkaranasos
YARN-7290. Method canContainerBePreempted can return true when it shouldn't. 
(Contributed by Steven Rand)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bde3aed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bde3aed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bde3aed

Branch: refs/heads/YARN-6592
Commit: 2bde3aedf139368fc71f053d8dd6580b498ff46d
Parents: 834e91e
Author: Yufei Gu 
Authored: Fri Nov 24 23:32:46 2017 -0800
Committer: Yufei Gu 
Committed: Fri Nov 24 23:32:46 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java| 23 +--
 .../scheduler/fair/FSPreemptionThread.java  | 68 ++--
 .../fair/TestFairSchedulerPreemption.java   | 37 ---
 3 files changed, 93 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bde3aed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index e711229..43daace 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -588,7 +588,8 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 }
   }
 
-  boolean canContainerBePreempted(RMContainer container) {
+  boolean canContainerBePreempted(RMContainer container,
+  Resource alreadyConsideringForPreemption) {
 if (!isPreemptable()) {
   return false;
 }
@@ -610,6 +611,15 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
 // Check if the app's allocation will be over its fairshare even
 // after preempting this container
+Resource usageAfterPreemption = getUsageAfterPreemptingContainer(
+container.getAllocatedResource(),
+alreadyConsideringForPreemption);
+
+return !isUsageBelowShare(usageAfterPreemption, getFairShare());
+  }
+
+  private Resource getUsageAfterPreemptingContainer(Resource 
containerResources,
+  Resource alreadyConsideringForPreemption) {
 Resource usageAfterPreemption = Resources.clone(getResourceUsage());
 
 // Subtract resources of containers already queued for preemption
@@ -617,10 +627,13 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   Resources.subtractFrom(usageAfterPreemption, resourcesToBePreempted);
 }
 
-// Subtract this container's allocation to compute usage after preemption
-Resources.subtractFrom(
-usageAfterPreemption, container.getAllocatedResource());
-return !isUsageBelowShare(usageAfterPreemption, getFairShare());
+// Subtract resources of this container and other containers of this app
+// that the FSPreemptionThread is already considering for preemption.
+Resources.subtractFrom(usageAfterPreemption, containerResources);
+Resources.subtractFrom(usageAfterPreemption,
+alreadyConsideringForPreemption);
+
+return usageAfterPreemption;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bde3aed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index b3e59c5..47e580d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
+++ 

[38/50] [abbrv] hadoop git commit: MAPREDUCE-7014. Fix java doc errors in jdk1.8. Contributed by Steve Loughran.

2017-11-28 Thread kkaranasos
MAPREDUCE-7014. Fix java doc errors in jdk1.8. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cd75845
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cd75845
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cd75845

Branch: refs/heads/YARN-6592
Commit: 3cd75845da1aced3d88e0ce68c68e8d95f48fb79
Parents: 2bde3ae
Author: Rohith Sharma K S 
Authored: Mon Nov 27 22:01:00 2017 +0530
Committer: Rohith Sharma K S 
Committed: Mon Nov 27 22:01:00 2017 +0530

--
 .../lib/output/PathOutputCommitterFactory.java  | 12 ++--
 .../src/main/java/org/apache/hadoop/fs/s3a/Invoker.java |  4 +++-
 .../java/org/apache/hadoop/fs/s3a/S3AFileSystem.java|  2 +-
 .../main/java/org/apache/hadoop/fs/s3a/S3AUtils.java|  4 
 .../org/apache/hadoop/fs/s3a/WriteOperationHelper.java  |  1 +
 .../hadoop/fs/s3a/commit/AbstractS3ACommitter.java  |  1 +
 .../apache/hadoop/fs/s3a/commit/CommitOperations.java   |  2 +-
 .../hadoop/fs/s3a/commit/staging/StagingCommitter.java  |  1 +
 .../hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java|  2 +-
 9 files changed, 19 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cd75845/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
index 0df14d1..7d214f2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
@@ -39,12 +39,12 @@ import org.apache.hadoop.util.ReflectionUtils;
  *
  * Algorithm:
  * 
- *   If an explicit committer factory is named, it is used.
- *   The output path is examined.
+ *   If an explicit committer factory is named, it is used.
+ *   The output path is examined.
  *   If is non null and there is an explicit schema for that filesystem,
- *   its factory is instantiated.
- *   Otherwise, an instance of {@link FileOutputCommitter} is
- *   created.
+ *   its factory is instantiated.
+ *   Otherwise, an instance of {@link FileOutputCommitter} is
+ *   created.
  * 
  *
  * In {@link FileOutputFormat}, the created factory has its method
@@ -186,7 +186,7 @@ public class PathOutputCommitterFactory extends Configured {
   }
 
   /**
-   * Create the committer factory for a task attempt & destination, then
+   * Create the committer factory for a task attempt and destination, then
* create the committer from it.
* @param outputPath the task's output path, or or null if no output path
* has been defined.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cd75845/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
index 9900f4c..107a247 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.io.retry.RetryPolicy;
  *
  * The static {@link #quietly(String, String, VoidOperation)} and
  * {@link #quietlyEval(String, String, Operation)} calls exist to take any
- * operation and quietly catch & log at debug. The return value of
+ * operation and quietly catch and log at debug. The return value of
  * {@link #quietlyEval(String, String, Operation)} is a java 8 optional,
  * which can then be used in java8-expressions.
  */
@@ -390,9 +390,11 @@ public class Invoker {
* Execute an operation; any exception raised is caught and
* logged at debug.
* The result is only non-empty if the operation succeeded
+   * @param  type to return
* @param action action to execute
* @param path path (for exception construction)
* @param operation operation
+   * @return the result of a successful operation
*/
   public static  Optional quietlyEval(String action,
   String 

[47/50] [abbrv] hadoop git commit: YARN-6594. [API] Introduce SchedulingRequest object. (Konstantinos Karanasos via wangda)

2017-11-28 Thread kkaranasos
YARN-6594. [API] Introduce SchedulingRequest object. (Konstantinos Karanasos 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3571634b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3571634b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3571634b

Branch: refs/heads/YARN-6592
Commit: 3571634b06003d40b9ff5e6235515740d69ded53
Parents: ccf07e9
Author: Wangda Tan 
Authored: Mon Oct 30 16:54:02 2017 -0700
Committer: Konstantinos Karanasos 
Committed: Tue Nov 28 13:46:29 2017 -0800

--
 .../hadoop/yarn/api/records/ResourceSizing.java |  64 +
 .../yarn/api/records/SchedulingRequest.java | 205 ++
 .../src/main/proto/yarn_protos.proto|  14 +
 .../records/impl/pb/ResourceSizingPBImpl.java   | 117 
 .../impl/pb/SchedulingRequestPBImpl.java| 266 +++
 5 files changed, 666 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3571634b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
new file mode 100644
index 000..d82be11
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * {@code ResourceSizing} contains information for the size of a
+ * {@link SchedulingRequest}, such as the number of requested allocations and
+ * the resources for each allocation.
+ */
+@Public
+@Unstable
+public abstract class ResourceSizing {
+
+  @Public
+  @Unstable
+  public static ResourceSizing newInstance(Resource resources) {
+return ResourceSizing.newInstance(1, resources);
+  }
+
+  @Public
+  @Unstable
+  public static ResourceSizing newInstance(int numAllocations, Resource 
resources) {
+ResourceSizing resourceSizing = Records.newRecord(ResourceSizing.class);
+resourceSizing.setNumAllocations(numAllocations);
+resourceSizing.setResources(resources);
+return resourceSizing;
+  }
+
+  @Public
+  @Unstable
+  public abstract int getNumAllocations();
+
+  @Public
+  @Unstable
+  public abstract void setNumAllocations(int numAllocations);
+
+  @Public
+  @Unstable
+  public abstract Resource getResources();
+
+  @Public
+  @Unstable
+  public abstract void setResources(Resource resources);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3571634b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
new file mode 100644
index 000..47a0697
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this 

[31/50] [abbrv] hadoop git commit: YARN-7524. Remove unused FairSchedulerEventLog. (Contributed by Wilfred Spiegelenburg)

2017-11-28 Thread kkaranasos
YARN-7524. Remove unused FairSchedulerEventLog. (Contributed by Wilfred 
Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cc9479d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cc9479d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cc9479d

Branch: refs/heads/YARN-6592
Commit: 4cc9479dae2bfb7d14d29b55d103eea9fa35a586
Parents: 738d1a2
Author: Yufei Gu 
Authored: Wed Nov 22 14:18:36 2017 -0800
Committer: Yufei Gu 
Committed: Wed Nov 22 14:18:36 2017 -0800

--
 .../scheduler/fair/FairScheduler.java   |   8 -
 .../fair/FairSchedulerConfiguration.java|  16 --
 .../scheduler/fair/FairSchedulerEventLog.java   | 152 ---
 .../fair/TestFairSchedulerEventLog.java |  83 --
 4 files changed, 259 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc9479d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index b2978d4..661d0a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -177,7 +177,6 @@ public class FairScheduler extends
   protected double rackLocalityThreshold; // Cluster threshold for rack 
locality
   protected long nodeLocalityDelayMs; // Delay for node locality
   protected long rackLocalityDelayMs; // Delay for rack locality
-  private FairSchedulerEventLog eventLog; // Machine-readable event log
   protected boolean assignMultiple; // Allocate multiple containers per
 // heartbeat
   @VisibleForTesting
@@ -404,10 +403,6 @@ public class FairScheduler extends
 return continuousSchedulingSleepMs;
   }
 
-  public FairSchedulerEventLog getEventLog() {
-return eventLog;
-  }
-
   /**
* Add a new application to the scheduler, with a given id, queue name, and
* user. This will accept a new app even if the user or queue is above
@@ -875,7 +870,6 @@ public class FairScheduler extends
 try {
   writeLock.lock();
   long start = getClock().getTime();
-  eventLog.log("HEARTBEAT", nm.getHostName());
   super.nodeUpdate(nm);
 
   FSSchedulerNode fsNode = getFSSchedulerNode(nm.getNodeID());
@@ -1284,8 +1278,6 @@ public class FairScheduler extends
 
   // This stores per-application scheduling information
   this.applications = new ConcurrentHashMap<>();
-  this.eventLog = new FairSchedulerEventLog();
-  eventLog.init(this.conf);
 
   allocConf = new AllocationConfiguration(conf);
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc9479d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index 9c9eee6..38e71a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -17,7 +17,6 @@
 */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import java.io.File;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -64,12 +63,6 @@ public class FairSchedulerConfiguration extends 

[23/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitterFactory.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitterFactory.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitterFactory.java
new file mode 100644
index 000..b3bcca1
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitterFactory.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit;
+
+import java.io.IOException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory;
+
+/**
+ * Dynamically create the output committer based on subclass type and settings.
+ */
+public abstract class AbstractS3ACommitterFactory
+extends PathOutputCommitterFactory {
+  public static final Logger LOG = LoggerFactory.getLogger(
+  AbstractS3ACommitterFactory.class);
+
+  @Override
+  public PathOutputCommitter createOutputCommitter(Path outputPath,
+  TaskAttemptContext context) throws IOException {
+FileSystem fs = getDestinationFileSystem(outputPath, context);
+PathOutputCommitter outputCommitter;
+if (fs instanceof S3AFileSystem) {
+  outputCommitter = createTaskCommitter((S3AFileSystem)fs,
+  outputPath, context);
+} else {
+  throw new PathCommitException(outputPath,
+  "Filesystem not supported by this committer");
+}
+LOG.info("Using Commmitter {} for {}",
+outputCommitter,
+outputPath);
+return outputCommitter;
+  }
+
+  /**
+   * Get the destination filesystem, returning null if there is none.
+   * Code using this must explicitly or implicitly look for a null value
+   * in the response.
+   * @param outputPath output path
+   * @param context job/task context
+   * @return the destination filesystem, if it can be determined
+   * @throws IOException if the FS cannot be instantiated
+   */
+  protected FileSystem getDestinationFileSystem(Path outputPath,
+  JobContext context)
+  throws IOException {
+return outputPath != null ?
+  FileSystem.get(outputPath.toUri(), context.getConfiguration())
+  : null;
+  }
+
+  /**
+   * Implementation point: create a task committer for a specific filesystem.
+   * @param fileSystem destination FS.
+   * @param outputPath final output path for work
+   * @param context task context
+   * @return a committer
+   * @throws IOException any problem, including the FS not supporting
+   * the desired committer
+   */
+  public abstract PathOutputCommitter createTaskCommitter(
+  S3AFileSystem fileSystem,
+  Path outputPath,
+  TaskAttemptContext context) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
new file mode 100644
index 000..03cfcba
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use 

[19/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md
new file mode 100644
index 000..c6dbf55
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md
@@ -0,0 +1,819 @@
+
+
+# Committing work to S3 with the "S3A Committers"
+
+
+
+This page covers the S3A Committers, which can commit work directly
+to an S3 object store.
+
+These committers are designed to solve a fundamental problem which
+the standard committers of work cannot do to S3: consistent, high performance,
+and reliable commitment of output to S3.
+
+For details on their internal design, see
+[S3A Committers: Architecture and 
Implementation](./committer_architecture.html).
+
+
+## Introduction: The Commit Problem
+
+
+Apache Hadoop MapReduce (and behind the scenes, Apache Spark) often write
+the output of their work to filesystems
+
+Normally, Hadoop uses the `FileOutputFormatCommitter` to manage the
+promotion of files created in a single task attempt to the final output of
+a query. This is done in a way to handle failures of tasks and jobs, and to
+support speculative execution. It does that by listing directories and renaming
+their content into the final destination when tasks and then jobs are 
committed.
+
+This has some key requirement of the underlying filesystem:
+
+1. When you list a directory, you see all the files which have been created in 
it,
+and no files which are not in it (i.e. have been deleted).
+1. When you rename a directory, it is an `O(1)` atomic transaction. No other
+process across the cluster may rename a file or directory to the same path.
+If the rename fails for any reason, either the data is at the original 
location,
+or it is at the destination, -in which case the rename actually succeeded.
+
+**The S3 object store and the `s3a://` filesystem client cannot meet these 
requirements.*
+
+1. Amazon S3 has inconsistent directory listings unless S3Guard is enabled.
+1. The S3A mimics `rename()` by copying files and then deleting the originals.
+This can fail partway through, and there is nothing to prevent any other 
process
+in the cluster attempting a rename at the same time.
+
+As a result,
+
+* Files my not be listed, hence not renamed into place.
+* Deleted files may still be discovered, confusing the rename process to the 
point
+of failure.
+* If a rename fails, the data is left in an unknown state.
+* If more than one process attempts to commit work simultaneously, the output
+directory may contain the results of both processes: it is no longer an 
exclusive
+operation.
+*. While S3Guard may deliver the listing consistency, commit time is still
+proportional to the amount of data created. It still can't handle task failure.
+
+**Using the "classic" `FileOutputCommmitter` to commit work to Amazon S3 risks
+loss or corruption of generated data**
+
+
+To address these problems there is now explicit support in the `hadop-aws`
+module for committing work to Amazon S3 via the S3A filesystem client,
+*the S3A Committers*
+
+
+For safe, as well as high-performance output of work to S3,
+we need use "a committer" explicitly written to work with S3, treating it as
+an object store with special features.
+
+
+### Background : Hadoop's "Commit Protocol"
+
+How exactly is work written to its final destination? That is accomplished by
+a "commit protocol" between the workers and the job manager.
+
+This protocol is implemented in Hadoop MapReduce, with a similar but extended
+version in Apache Spark:
+
+1. A "Job" is the entire query, with inputs to outputs
+1. The "Job Manager" is the process in charge of choreographing the execution
+of the job. It may perform some of the actual computation too.
+1. The job has "workers", which are processes which work the actual data
+and write the results.
+1. Workers execute "Tasks", which are fractions of the job, a job whose
+input has been *partitioned* into units of work which can be executed 
independently.
+1. The Job Manager directs workers to execute "tasks", usually trying to 
schedule
+the work close to the data (if the filesystem provides locality information).
+1. Workers can fail: the Job manager needs to detect this and reschedule their 
active tasks.
+1. Workers can also become separated from the Job Manager, a "network 
partition".
+It is (provably) impossible for the Job Manager to distinguish a 
running-but-unreachable
+worker from a failed one.
+1. The output of a failed task must not be visible; this is to avoid its
+data getting into the final output.
+1. Multiple workers can be instructed to evaluate the same partition of the 
work;
+this "speculation" delivers speedup as it can address the "straggler problem".

[50/50] [abbrv] hadoop git commit: YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

2017-11-28 Thread kkaranasos
YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos 
via wangda)

Change-Id: Id00edb7185fdf01cce6e40f920cac3585f8cbe9c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccf07e95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccf07e95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccf07e95

Branch: refs/heads/YARN-6592
Commit: ccf07e959c1856f00fe40c3dfcadf262a910
Parents: 30941d9
Author: Wangda Tan 
Authored: Thu Aug 3 14:03:55 2017 -0700
Committer: Konstantinos Karanasos 
Committed: Tue Nov 28 13:46:29 2017 -0800

--
 .../yarn/api/resource/PlacementConstraint.java  | 567 +++
 .../yarn/api/resource/PlacementConstraints.java | 286 ++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../src/main/proto/yarn_protos.proto|  55 ++
 .../api/resource/TestPlacementConstraints.java  | 106 
 .../PlacementConstraintFromProtoConverter.java  | 116 
 .../pb/PlacementConstraintToProtoConverter.java | 174 ++
 .../apache/hadoop/yarn/api/pb/package-info.java |  23 +
 .../yarn/api/records/impl/pb/ProtoUtils.java|  27 +
 .../PlacementConstraintTransformations.java | 209 +++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../TestPlacementConstraintPBConversion.java| 195 +++
 .../TestPlacementConstraintTransformations.java | 183 ++
 13 files changed, 1987 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccf07e95/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
new file mode 100644
index 000..f0e3982
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -0,0 +1,567 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * {@code PlacementConstraint} represents a placement constraint for a resource
+ * allocation.
+ */
+@Public
+@Unstable
+public class PlacementConstraint {
+
+  /**
+   * The constraint expression tree.
+   */
+  private AbstractConstraint constraintExpr;
+
+  public PlacementConstraint(AbstractConstraint constraintExpr) {
+this.constraintExpr = constraintExpr;
+  }
+
+  /**
+   * Get the constraint expression of the placement constraint.
+   *
+   * @return the constraint expression
+   */
+  public AbstractConstraint getConstraintExpr() {
+return constraintExpr;
+  }
+
+  /**
+   * Interface used to enable the elements of the constraint tree to be 
visited.
+   */
+  @Private
+  public interface Visitable {
+/**
+ * Visitor pattern.
+ *
+ * @param visitor visitor to be used
+ * @param  defines the type that the visitor will use and the return 
type
+ *  of the accept.
+ * @return the result of visiting a given object.
+ */
+ T accept(Visitor visitor);
+
+  }
+
+  /**
+   * Visitor API for a constraint tree.
+   *
+   * @param  determines the return type of the visit methods.
+   */
+  @Private
+  public interface Visitor {
+T visit(SingleConstraint constraint);
+
+T visit(TargetExpression target);
+
+T visit(TargetConstraint constraint);
+
+T visit(CardinalityConstraint constraint);
+
+T 

[44/50] [abbrv] hadoop git commit: YARN-7480. Render tooltips on columns where text is clipped in new YARN UI. Contributed by Vasudevan Skm. This closes #293

2017-11-28 Thread kkaranasos
YARN-7480. Render tooltips on columns where text is clipped in new YARN UI. 
Contributed by Vasudevan Skm. This closes #293


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b76695f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b76695f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b76695f

Branch: refs/heads/YARN-6592
Commit: 6b76695f886d4db7287a0425d56d5e13daf5d08d
Parents: 641ba5c
Author: Sunil G 
Authored: Tue Nov 28 22:41:52 2017 +0530
Committer: Sunil G 
Committed: Tue Nov 28 22:41:52 2017 +0530

--
 .../app/components/em-table-tooltip-text.js | 33 +++
 .../webapp/app/controllers/app-table-columns.js |  4 ++
 .../components/em-table-tooltip-text.hbs| 26 
 .../components/em-table-tooltip-text-test.js| 43 
 4 files changed, 106 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
new file mode 100644
index 000..f363460
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ import Ember from 'ember';
+
+export default Ember.Component.extend({
+  content: null,
+
+  classNames: ["em-table-text-with-tooltip"],
+
+  didRender: function() {
+this.$().parent().css("position", "static");
+  },
+
+  tooltipText: Ember.computed("content", function () {
+return this.get("content");
+  }),
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index a87acc1..fb002f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -50,6 +50,7 @@ export default Ember.Controller.extend({
   }, {
   id: 'appName',
   headerTitle: 'Application Name',
+  cellComponentName: 'em-table-tooltip-text',
   contentPath: 'appName',
   facetType: null,
   }, {
@@ -66,6 +67,7 @@ export default Ember.Controller.extend({
   }, {
   id: 'queue',
   headerTitle: 'Queue',
+  cellComponentName: 'em-table-tooltip-text',
   contentPath: 'queue',
   }, {
   id: 'progress',
@@ -128,6 +130,7 @@ export default Ember.Controller.extend({
   headerTitle: 'Application ID',
   contentPath: 'id',
   facetType: null,
+  cellComponentName: 'em-table-tooltip-text',
   minWidth: "250px"
 }, {
   id: 'state',
@@ -160,6 +163,7 @@ export default Ember.Controller.extend({
 id: 'queue',
 headerTitle: 'Queue',
 contentPath: 'queue',
+cellComponentName: 'em-table-tooltip-text',
 }, {
   id: 'stTime',
   headerTitle: 'Started Time',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/em-table-tooltip-text.hbs
--
diff --git 

[25/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index f846689..96de8e4 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -22,17 +22,16 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.Callable;
+import java.util.Locale;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.event.ProgressEvent;
 import com.amazonaws.event.ProgressEventType;
 import com.amazonaws.event.ProgressListener;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
 import com.amazonaws.services.s3.model.PartETag;
 import com.amazonaws.services.s3.model.PutObjectRequest;
 import com.amazonaws.services.s3.model.PutObjectResult;
@@ -47,8 +46,9 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.fs.StreamCapabilities;
+import org.apache.hadoop.fs.s3a.commit.CommitConstants;
+import org.apache.hadoop.fs.s3a.commit.PutTracker;
 import org.apache.hadoop.util.Progressable;
 
 import static org.apache.hadoop.fs.s3a.S3AUtils.*;
@@ -65,7 +65,8 @@ import static org.apache.hadoop.fs.s3a.Statistic.*;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-class S3ABlockOutputStream extends OutputStream {
+class S3ABlockOutputStream extends OutputStream implements
+StreamCapabilities {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(S3ABlockOutputStream.class);
@@ -87,14 +88,6 @@ class S3ABlockOutputStream extends OutputStream {
   private final ListeningExecutorService executorService;
 
   /**
-   * Retry policy for multipart commits; not all AWS SDK versions retry that.
-   */
-  private final RetryPolicy retryPolicy =
-  RetryPolicies.retryUpToMaximumCountWithProportionalSleep(
-  5,
-  2000,
-  TimeUnit.MILLISECONDS);
-  /**
* Factory for blocks.
*/
   private final S3ADataBlocks.BlockFactory blockFactory;
@@ -120,7 +113,12 @@ class S3ABlockOutputStream extends OutputStream {
   /**
* Write operation helper; encapsulation of the filesystem operations.
*/
-  private final S3AFileSystem.WriteOperationHelper writeOperationHelper;
+  private final WriteOperationHelper writeOperationHelper;
+
+  /**
+   * Track multipart put operation.
+   */
+  private final PutTracker putTracker;
 
   /**
* An S3A output stream which uploads partitions in a separate pool of
@@ -138,6 +136,7 @@ class S3ABlockOutputStream extends OutputStream {
* @param blockFactory factory for creating stream destinations
* @param statistics stats for this stream
* @param writeOperationHelper state of the write operation.
+   * @param putTracker put tracking for commit support
* @throws IOException on any problem
*/
   S3ABlockOutputStream(S3AFileSystem fs,
@@ -147,7 +146,8 @@ class S3ABlockOutputStream extends OutputStream {
   long blockSize,
   S3ADataBlocks.BlockFactory blockFactory,
   S3AInstrumentation.OutputStreamStatistics statistics,
-  S3AFileSystem.WriteOperationHelper writeOperationHelper)
+  WriteOperationHelper writeOperationHelper,
+  PutTracker putTracker)
   throws IOException {
 this.fs = fs;
 this.key = key;
@@ -155,6 +155,7 @@ class S3ABlockOutputStream extends OutputStream {
 this.blockSize = (int) blockSize;
 this.statistics = statistics;
 this.writeOperationHelper = writeOperationHelper;
+this.putTracker = putTracker;
 Preconditions.checkArgument(blockSize >= Constants.MULTIPART_MIN_SIZE,
 "Block size is too small: %d", blockSize);
 this.executorService = MoreExecutors.listeningDecorator(executorService);
@@ -166,7 +167,11 @@ class S3ABlockOutputStream extends OutputStream {
 // writes a 0-byte entry.
 createBlockIfNeeded();
 LOG.debug("Initialized S3ABlockOutputStream for {}" +
-" output to {}", writeOperationHelper, activeBlock);
+" output to {}", key, activeBlock);
+if (putTracker.initialize()) {
+  LOG.debug("Put tracker requests multipart upload");
+  

[40/50] [abbrv] hadoop git commit: YARN-7363. ContainerLocalizer don't have a valid log4j config in case of Linux container executor. (Contributed by Yufei Gu)

2017-11-28 Thread kkaranasos
YARN-7363. ContainerLocalizer don't have a valid log4j config in case of Linux 
container executor. (Contributed by Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8923cdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8923cdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8923cdb

Branch: refs/heads/YARN-6592
Commit: d8923cdbf1567aee10a54f144fef734d1465ebed
Parents: fedabca
Author: Yufei Gu 
Authored: Mon Nov 27 11:47:11 2017 -0800
Committer: Yufei Gu 
Committed: Mon Nov 27 14:31:52 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 +++
 .../src/main/resources/yarn-default.xml |  8 
 .../nodemanager/LinuxContainerExecutor.java | 28 +++-
 .../WindowsSecureContainerExecutor.java |  2 +-
 .../localizer/ContainerLocalizer.java   | 46 +++-
 .../TestLinuxContainerExecutorWithMocks.java| 19 +---
 6 files changed, 98 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8923cdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ead9977..c1024ea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1675,6 +1675,12 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_CONTAINER_LOCALIZER_JAVA_OPTS_DEFAULT =
   "-Xmx256m";
 
+  /** The log level of container localizer process. */
+  public static final String NM_CONTAINER_LOCALIZER_LOG_LEVEL=
+  NM_PREFIX + "container-localizer.log.level";
+  public static final String NM_CONTAINER_LOCALIZER_LOG_LEVEL_DEFAULT =
+  "INFO";
+
   /** Prefix for runtime configuration constants. */
   public static final String LINUX_CONTAINER_RUNTIME_PREFIX = NM_PREFIX +
   "runtime.linux.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8923cdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 12cb902..dd9c6bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1165,6 +1165,14 @@
 
   
 
+  The log level for container localizer while it is an independent process.
+
+yarn.nodemanager.container-localizer.log.level
+INFO
+  
+
+  
+
   Where to store container logs. An application's localized log directory
   will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
   Individual containers' log directories will be below this, in 
directories 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8923cdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index e8c46a2..eaf664f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ConfigurationException;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;

[39/50] [abbrv] hadoop git commit: YARN-6168. Restarted RM may not inform AM about all existing containers. Contributed by Chandni Singh

2017-11-28 Thread kkaranasos
YARN-6168. Restarted RM may not inform AM about all existing containers. 
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fedabcad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fedabcad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fedabcad

Branch: refs/heads/YARN-6592
Commit: fedabcad42067ac7dd24de40fab6be2d3485a540
Parents: 3cd7584
Author: Jian He 
Authored: Mon Nov 27 09:55:08 2017 -0800
Committer: Jian He 
Committed: Mon Nov 27 10:19:58 2017 -0800

--
 .../api/protocolrecords/AllocateResponse.java   |  54 +++
 .../src/main/proto/yarn_service_protos.proto|   1 +
 .../impl/pb/AllocateResponsePBImpl.java |  37 +
 .../resourcemanager/DefaultAMSProcessor.java|   3 +
 .../scheduler/AbstractYarnScheduler.java|   4 +-
 .../resourcemanager/scheduler/Allocation.java   |  13 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  48 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |   5 +-
 .../scheduler/fair/FairScheduler.java   |   3 +-
 .../applicationsmanager/TestAMRestart.java  | 149 +++
 10 files changed, 310 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fedabcad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
index 9b254ae..98346ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -372,6 +372,44 @@ public abstract class AllocateResponse {
   public void setUpdateErrors(List updateErrors) {
   }
 
+  /**
+   * Get the list of running containers as viewed by
+   * ResourceManager from previous application attempts which
+   * have not been reported to the Application Master yet.
+   * 
+   * These containers were recovered by the RM after the application master
+   * had already registered. This may happen after RM restart when some NMs get
+   * delayed in connecting to the RM and reporting the active containers.
+   * Since they were not reported in the registration
+   * response, they are reported in the response to the AM heartbeat.
+   *
+   * @return the list of running containers as viewed by
+   * ResourceManager from previous application attempts.
+   */
+  @Public
+  @Unstable
+  public abstract List getContainersFromPreviousAttempts();
+
+  /**
+   * Set the list of running containers as viewed by
+   * ResourceManager from previous application attempts which have
+   * not been reported to the Application Master yet.
+   * 
+   * These containers were recovered by the RM after the application master
+   * had already registered. This may happen after RM restart when some NMs get
+   * delayed in connecting to the RM and reporting the active containers.
+   * Since they were not reported in the registration
+   * response, they are reported in the response to the AM heartbeat.
+   *
+   * @param containersFromPreviousAttempt
+   *  the list of running containers as viewed by
+   *  ResourceManager from previous application attempts.
+   */
+  @Private
+  @Unstable
+  public abstract void setContainersFromPreviousAttempts(
+  List containersFromPreviousAttempt);
+
   @Private
   @Unstable
   public static AllocateResponseBuilder newBuilder() {
@@ -590,6 +628,22 @@ public abstract class AllocateResponse {
 }
 
 /**
+ * Set the containersFromPreviousAttempt of the response.
+ * @see AllocateResponse#setContainersFromPreviousAttempts(List)
+ * @param containersFromPreviousAttempt
+ * containersFromPreviousAttempt of the response
+ * @return {@link AllocateResponseBuilder}
+ */
+@Private
+@Unstable
+public AllocateResponseBuilder containersFromPreviousAttempt(
+List containersFromPreviousAttempt) {
+  allocateResponse.setContainersFromPreviousAttempts(
+  containersFromPreviousAttempt);
+  return this;
+}
+
+/**
  * Return generated {@link AllocateResponse} object.
  * @return {@link AllocateResponse}
  */


[11/50] [abbrv] hadoop git commit: YARN-7531. ResourceRequest.equal does not check ExecutionTypeRequest.enforceExecutionType().

2017-11-28 Thread kkaranasos
YARN-7531. ResourceRequest.equal does not check 
ExecutionTypeRequest.enforceExecutionType().


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67bbbe1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67bbbe1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67bbbe1c

Branch: refs/heads/YARN-6592
Commit: 67bbbe1c0c05fa01b08a8dabe93c146935420450
Parents: 0ed44f2
Author: Haibo Chen 
Authored: Fri Nov 17 14:30:43 2017 -0800
Committer: Haibo Chen 
Committed: Tue Nov 21 09:09:16 2017 -0800

--
 .../yarn/api/records/ResourceRequest.java   |  3 +-
 .../hadoop/yarn/api/TestResourceRequest.java| 47 
 2 files changed, 48 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67bbbe1c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
index beb3380..e46647a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
@@ -630,8 +630,7 @@ public abstract class ResourceRequest implements 
Comparable {
   if (other.getExecutionTypeRequest() != null) {
 return false;
   }
-} else if (!execTypeRequest.getExecutionType()
-.equals(other.getExecutionTypeRequest().getExecutionType())) {
+} else if (!execTypeRequest.equals(other.getExecutionTypeRequest())) {
   return false;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67bbbe1c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourceRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourceRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourceRequest.java
new file mode 100644
index 000..aef838c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourceRequest.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * The class to test {@link ResourceRequest}.
+ */
+public class TestResourceRequest {
+
+  @Test
+  public void testEqualsOnExecutionTypeRequest() {
+ResourceRequest resourceRequestA =
+ResourceRequest.newInstance(Priority.newInstance(0), "localhost",
+Resource.newInstance(1024, 1), 1, false, "",
+ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED, true));
+
+ResourceRequest resourceRequestB =
+ResourceRequest.newInstance(Priority.newInstance(0), "localhost",
+Resource.newInstance(1024, 1), 1, false, "",
+ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED, false));
+
+Assert.assertFalse(resourceRequestA.equals(resourceRequestB));
+  }
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[45/50] [abbrv] hadoop git commit: YARN-6647. RM can crash during transitionToStandby due to InterruptedException. Contributed by Bibin A Chundatt

2017-11-28 Thread kkaranasos
YARN-6647. RM can crash during transitionToStandby due to InterruptedException. 
Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2c7a73e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2c7a73e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2c7a73e

Branch: refs/heads/YARN-6592
Commit: a2c7a73e33045ce42cce19aacbe45c0421a61994
Parents: 6b76695
Author: Jason Lowe 
Authored: Tue Nov 28 11:10:18 2017 -0600
Committer: Jason Lowe 
Committed: Tue Nov 28 11:15:44 2017 -0600

--
 .../RMDelegationTokenSecretManager.java | 42 ++--
 1 file changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c7a73e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
index 53cc471..37cd741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
@@ -82,14 +82,21 @@ public class RMDelegationTokenSecretManager extends
 return new RMDelegationTokenIdentifier();
   }
 
+  private boolean shouldIgnoreException(Exception e) {
+return !running && e.getCause() instanceof InterruptedException;
+  }
+
   @Override
   protected void storeNewMasterKey(DelegationKey newKey) {
 try {
   LOG.info("storing master key with keyID " + newKey.getKeyId());
   rm.getRMContext().getStateStore().storeRMDTMasterKey(newKey);
 } catch (Exception e) {
-  LOG.error("Error in storing master key with KeyID: " + 
newKey.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error(
+"Error in storing master key with KeyID: " + newKey.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -99,8 +106,10 @@ public class RMDelegationTokenSecretManager extends
   LOG.info("removing master key with keyID " + key.getKeyId());
   rm.getRMContext().getStateStore().removeRMDTMasterKey(key);
 } catch (Exception e) {
-  LOG.error("Error in removing master key with KeyID: " + key.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in removing master key with KeyID: " + 
key.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -113,9 +122,11 @@ public class RMDelegationTokenSecretManager extends
   rm.getRMContext().getStateStore().storeRMDelegationToken(identifier,
   renewDate);
 } catch (Exception e) {
-  LOG.error("Error in storing RMDelegationToken with sequence number: "
-  + identifier.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in storing RMDelegationToken with sequence number: "
++ identifier.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -127,9 +138,11 @@ public class RMDelegationTokenSecretManager extends
   + id.getSequenceNumber());
   rm.getRMContext().getStateStore().updateRMDelegationToken(id, renewDate);
 } catch (Exception e) {
-  LOG.error("Error in updating persisted RMDelegationToken" +
-" with sequence number: " + id.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in updating persisted RMDelegationToken"
++ " with sequence number: " + id.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -141,9 +154,12 @@ public class RMDelegationTokenSecretManager extends
   + ident.getSequenceNumber());
   rm.getRMContext().getStateStore().removeRMDelegationToken(ident);
 } catch (Exception e) {
-  LOG.error("Error in removing RMDelegationToken with sequence number: "
-  + ident.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if 

[12/50] [abbrv] hadoop git commit: YARN-7513. Remove the scheduler lock in FSAppAttempt.getWeight() (Contributed by Wilfred Spiegelenburg)

2017-11-28 Thread kkaranasos
YARN-7513. Remove the scheduler lock in FSAppAttempt.getWeight() (Contributed 
by Wilfred Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03c311ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03c311ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03c311ea

Branch: refs/heads/YARN-6592
Commit: 03c311eae3ad591630a452921172a4406dbda181
Parents: 67bbbe1
Author: yufei 
Authored: Tue Nov 21 10:33:34 2017 -0800
Committer: yufei 
Committed: Tue Nov 21 10:33:34 2017 -0800

--
 .../resourcemanager/scheduler/fair/FSAppAttempt.java  | 14 --
 1 file changed, 4 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03c311ea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 94991eb..e711229 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -1304,20 +1304,14 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
   @Override
   public float getWeight() {
-double weight = 1.0;
+float weight = 1.0F;
 
 if (scheduler.isSizeBasedWeight()) {
-  scheduler.getSchedulerReadLock().lock();
-
-  try {
-// Set weight based on current memory demand
-weight = Math.log1p(getDemand().getMemorySize()) / Math.log(2);
-  } finally {
-scheduler.getSchedulerReadLock().unlock();
-  }
+  // Set weight based on current memory demand
+  weight = (float)(Math.log1p(demand.getMemorySize()) / Math.log(2));
 }
 
-return (float)weight * this.getPriority().getPriority();
+return weight * appPriority.getPriority();
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java
new file mode 100644
index 000..47d112d
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.hadoop.fs.Path;
+
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+import static org.apache.hadoop.fs.s3a.commit.MagicCommitPaths.*;
+import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
+
+/**
+ * Tests for {@link MagicCommitPaths} path operations.
+ */
+public class TestMagicCommitPaths extends Assert {
+
+  private static final List MAGIC_AT_ROOT =
+  list(MAGIC);
+  private static final List MAGIC_AT_ROOT_WITH_CHILD =
+  list(MAGIC, "child");
+  private static final List MAGIC_WITH_CHILD =
+  list("parent", MAGIC, "child");
+  private static final List MAGIC_AT_WITHOUT_CHILD =
+  list("parent", MAGIC);
+
+  private static final List DEEP_MAGIC =
+  list("parent1", "parent2", MAGIC, "child1", "child2");
+
+  public static final String[] EMPTY = {};
+
+  @Test
+  public void testSplitPathEmpty() throws Throwable {
+intercept(IllegalArgumentException.class,
+() -> splitPathToElements(new Path("")));
+  }
+
+  @Test
+  public void testSplitPathDoubleBackslash() {
+assertPathSplits("//", EMPTY);
+  }
+
+  @Test
+  public void testSplitRootPath() {
+assertPathSplits("/", EMPTY);
+  }
+
+  @Test
+  public void testSplitBasic() {
+assertPathSplits("/a/b/c",
+new String[]{"a", "b", "c"});
+  }
+
+  @Test
+  public void testSplitTrailingSlash() {
+assertPathSplits("/a/b/c/",
+new String[]{"a", "b", "c"});
+  }
+
+  @Test
+  public void testSplitShortPath() {
+assertPathSplits("/a",
+new String[]{"a"});
+  }
+
+  @Test
+  public void testSplitShortPathTrailingSlash() {
+assertPathSplits("/a/",
+new String[]{"a"});
+  }
+
+  @Test
+  public void testParentsMagicRoot() {
+assertParents(EMPTY, MAGIC_AT_ROOT);
+  }
+
+  @Test
+  public void testChildrenMagicRoot() {
+assertChildren(EMPTY, MAGIC_AT_ROOT);
+  }
+
+  @Test
+  public void testParentsMagicRootWithChild() {
+assertParents(EMPTY, MAGIC_AT_ROOT_WITH_CHILD);
+  }
+
+  @Test
+  public void testChildMagicRootWithChild() {
+assertChildren(a("child"), MAGIC_AT_ROOT_WITH_CHILD);
+  }
+
+  @Test
+  public void testChildrenMagicWithoutChild() {
+assertChildren(EMPTY, MAGIC_AT_WITHOUT_CHILD);
+  }
+
+  @Test
+  public void testChildMagicWithChild() {
+assertChildren(a("child"), MAGIC_WITH_CHILD);
+  }
+
+  @Test
+  public void testParentMagicWithChild() {
+assertParents(a("parent"), MAGIC_WITH_CHILD);
+  }
+
+  @Test
+  public void testParentDeepMagic() {
+assertParents(a("parent1", "parent2"), DEEP_MAGIC);
+  }
+
+  @Test
+  public void testChildrenDeepMagic() {
+assertChildren(a("child1", "child2"), DEEP_MAGIC);
+  }
+
+  @Test
+  public void testLastElementEmpty() throws Throwable {
+intercept(IllegalArgumentException.class,
+() -> lastElement(new ArrayList<>(0)));
+  }
+
+  @Test
+  public void testLastElementSingle() {
+assertEquals("first", lastElement(l("first")));
+  }
+
+  @Test
+  public void testLastElementDouble() {
+assertEquals("2", lastElement(l("first", "2")));
+  }
+
+  @Test
+  public void testFinalDestinationNoMagic() {
+assertEquals(l("first", "2"),
+finalDestination(l("first", "2")));
+  }
+
+  @Test
+  public void testFinalDestinationMagic1() {
+assertEquals(l("first", "2"),
+finalDestination(l("first", MAGIC, 

[35/50] [abbrv] hadoop git commit: HADOOP-15067. GC time percentage reported in JvmMetrics should be a gauge, not counter. Contributed by Misha Dmitriev.

2017-11-28 Thread kkaranasos
HADOOP-15067. GC time percentage reported in JvmMetrics should be a gauge, not 
counter. Contributed by Misha Dmitriev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d162252d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d162252d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d162252d

Branch: refs/heads/YARN-6592
Commit: d162252d7a7223631ff66ba0210953296407e55f
Parents: b46ca7e
Author: Xiao Chen 
Authored: Thu Nov 23 09:00:59 2017 -0800
Committer: Xiao Chen 
Committed: Thu Nov 23 09:01:28 2017 -0800

--
 .../main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java| 2 +-
 .../java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d162252d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
index 8c3375f..5f9afdd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
@@ -188,7 +188,7 @@ public class JvmMetrics implements MetricsSource {
 }
 
 if (gcTimeMonitor != null) {
-  rb.addCounter(GcTimePercentage,
+  rb.addGauge(GcTimePercentage,
   gcTimeMonitor.getLatestGcData().getGcTimePercentage());
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d162252d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
index 5320b6e..aa1b009 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
@@ -101,7 +101,7 @@ public class TestJvmMetrics {
 verify(rb).tag(SessionId, "test");
 for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
   if (info.name().equals("GcTimePercentage")) {
-verify(rb).addCounter(eq(info), anyInt());
+verify(rb).addGauge(eq(info), anyInt());
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. Contributed by Lei (Eddy) Xu.

2017-11-28 Thread kkaranasos
HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/785732c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/785732c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/785732c1

Branch: refs/heads/YARN-6592
Commit: 785732c13e2ebe9f27350b6be82eb2fb782d7dc4
Parents: d42a336
Author: Lei Xu 
Authored: Wed Nov 22 10:19:58 2017 -0800
Committer: Lei Xu 
Committed: Wed Nov 22 10:22:32 2017 -0800

--
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 6293 -> 6753 bytes
 .../src/test/resources/editsStored.xml  | 750 +++
 2 files changed, 423 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/785732c1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
index 8029575..3f2817a 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/785732c1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
index 0a1c25e..2a57c73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
@@ -13,8 +13,8 @@
   2
   
 1
-1423097579620
-ef3f2032e2797e8e
+1512000829976
+e7457bcc6ab95a84
   
 
   
@@ -24,8 +24,8 @@
   3
   
 2
-1423097579622
-b978ed731a0b4a65
+1512000829980
+07cc38caf6c47bb4
   
 
   
@@ -37,19 +37,19 @@
   16386
   /file_create
   1
-  1422406380345
-  1422406380345
+  1511309632199
+  1511309632199
   512
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   true
   
-xyao
+lei
 supergroup
 420
   
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  6
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  5
 
   
   
@@ -60,14 +60,14 @@
   0
   /file_create
   1
-  1422406380369
-  1422406380345
+  1511309632248
+  1511309632199
   512
   
   
   false
   
-xyao
+lei
 supergroup
 420
   
@@ -78,11 +78,11 @@
 
   6
   /file_create
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   false
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  8
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  7
 
   
   
@@ -93,23 +93,118 @@
   0
   /file_create
   1
-  1422406380376
-  1422406380345
+  1511309632263
+  1511309632199
   512
   
   
   false
   
-xyao
+lei
 supergroup
 420
   
 
   
   
-OP_SET_STORAGE_POLICY
+OP_ADD
 
   8
+  0
+  16387
+  /update_blocks
+  1
+  1511309632266
+  1511309632266
+  4096
+  DFSClient_NONMAPREDUCE_2134933941_1
+  127.0.0.1
+  true
+  
+lei
+supergroup
+420
+  
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  9
+
+  
+  
+OP_ALLOCATE_BLOCK_ID
+
+  9
+  1073741825
+
+  
+  
+OP_SET_GENSTAMP_V2
+
+  10
+  1001
+
+  
+  
+OP_ADD_BLOCK
+
+  11
+  /update_blocks
+  
+1073741825
+0
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  12
+  /update_blocks
+  
+1073741825
+1
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  13
+  /update_blocks
+  
+  -2
+
+  
+  
+OP_CLOSE
+
+  14
+  0
+  0
+  /update_blocks
+  1
+  1511309632454
+  1511309632266
+  4096
+  
+  
+  false
+  
+lei
+supergroup
+420
+  
+
+  
+  
+OP_SET_STORAGE_POLICY
+
+  15
   /file_create
   7
 
@@ -117,36 +212,36 @@
   
 OP_RENAME_OLD
 
-  9
+  16
   0
   /file_create
   /file_moved
-  1422406380383
-  

[17/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java
new file mode 100644
index 000..4d7f524
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java
@@ -0,0 +1,1371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.MapFile;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+import org.apache.hadoop.mapreduce.task.JobContextImpl;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.S3AUtils.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * Test the job/task commit actions of an S3A Committer, including trying to
+ * simulate some failure and retry conditions.
+ * Derived from
+ * {@code org.apache.hadoop.mapreduce.lib.output.TestFileOutputCommitter}.
+ *
+ * This is a complex test suite as it tries to explore the full lifecycle
+ * of committers, and is designed for subclassing.
+ */
+@SuppressWarnings({"unchecked", "ThrowableNotThrown", "unused"})
+public abstract class AbstractITCommitProtocol extends AbstractCommitITest {
+  private Path outDir;
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AbstractITCommitProtocol.class);
+
+  private static final String SUB_DIR = "SUB_DIR";
+
+  protected static final String PART_0 = "part-m-0";
+
+  /**
+   * Counter to guarantee that even in parallel test runs, no job has the same
+   * ID.
+   */
+
+  private String jobId;
+
+  // A random task attempt id for testing.
+  private String attempt0;
+  private TaskAttemptID taskAttempt0;
+
+  private String attempt1;
+  private TaskAttemptID taskAttempt1;
+
+  private static final Text KEY_1 = new Text("key1");
+  private static final Text KEY_2 = new Text("key2");
+  private static final Text VAL_1 = new Text("val1");
+  private static final Text VAL_2 = new Text("val2");
+
+  /** A job to abort in test case teardown. */
+  

[10/50] [abbrv] hadoop git commit: HADOOP-15046. Document Apache Hadoop does not support Java 9 in BUILDING.txt. Contributed by Hanisha Koneru.

2017-11-28 Thread kkaranasos
HADOOP-15046. Document Apache Hadoop does not support Java 9 in BUILDING.txt. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ed44f25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ed44f25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ed44f25

Branch: refs/heads/YARN-6592
Commit: 0ed44f25653ad2d97e2726140a7f77a555c40471
Parents: 659e85e
Author: Akira Ajisaka 
Authored: Wed Nov 22 01:07:42 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Nov 22 01:07:42 2017 +0900

--
 BUILDING.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ed44f25/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 9955563..dec3011 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -4,7 +4,7 @@ Build instructions for Hadoop
 Requirements:
 
 * Unix System
-* JDK 1.8+
+* JDK 1.8
 * Maven 3.3 or later
 * ProtocolBuffer 2.5.0
 * CMake 3.1 or newer (if compiling native code)
@@ -344,7 +344,7 @@ Building on Windows
 Requirements:
 
 * Windows System
-* JDK 1.8+
+* JDK 1.8
 * Maven 3.0 or later
 * ProtocolBuffer 2.5.0
 * CMake 3.1 or newer


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: HDFS-12804. Use slf4j instead of log4j in FSEditLog. Contributed by Mukul Kumar Singh.

2017-11-28 Thread kkaranasos
HDFS-12804. Use slf4j instead of log4j in FSEditLog. Contributed by Mukul Kumar 
Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60fc2a13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60fc2a13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60fc2a13

Branch: refs/heads/YARN-6592
Commit: 60fc2a138827c2c29fa7e9d6844e3b8d43809726
Parents: 0d781dd
Author: Chen Liang 
Authored: Mon Nov 20 12:49:53 2017 -0800
Committer: Chen Liang 
Committed: Mon Nov 20 12:49:53 2017 -0800

--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 23 ++--
 .../hdfs/server/namenode/TestEditLog.java   |  4 ++--
 .../server/namenode/TestEditLogAutoroll.java| 10 -
 .../hdfs/server/namenode/TestEditLogRace.java   |  4 ++--
 .../server/namenode/ha/TestEditLogTailer.java   |  8 +++
 5 files changed, 24 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60fc2a13/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 7ca63f8..72e00ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -29,8 +29,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -114,6 +112,8 @@ import 
org.apache.hadoop.security.token.delegation.DelegationKey;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * FSEditLog maintains a log of the namespace modifications.
@@ -122,9 +122,7 @@ import com.google.common.collect.Lists;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class FSEditLog implements LogsPurgeable {
-
-  public static final Log LOG = LogFactory.getLog(FSEditLog.class);
-
+  public static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class);
   /**
* State machine for edit log.
* 
@@ -329,7 +327,8 @@ public class FSEditLog implements LogsPurgeable {
   String error = String.format("Cannot start writing at txid %s " +
 "when there is a stream available for read: %s",
 segmentTxId, streams.get(0));
-  IOUtils.cleanup(LOG, streams.toArray(new EditLogInputStream[0]));
+  IOUtils.cleanupWithLogger(LOG,
+  streams.toArray(new EditLogInputStream[0]));
   throw new IllegalStateException(error);
 }
 
@@ -689,9 +688,9 @@ public class FSEditLog implements LogsPurgeable {
 "Could not sync enough journals to persistent storage " +
 "due to " + e.getMessage() + ". " +
 "Unsynced transactions: " + (txid - synctxid);
-LOG.fatal(msg, new Exception());
+LOG.error(msg, new Exception());
 synchronized(journalSetLock) {
-  IOUtils.cleanup(LOG, journalSet);
+  IOUtils.cleanupWithLogger(LOG, journalSet);
 }
 terminate(1, msg);
   }
@@ -715,9 +714,9 @@ public class FSEditLog implements LogsPurgeable {
   final String msg =
   "Could not sync enough journals to persistent storage. "
   + "Unsynced transactions: " + (txid - synctxid);
-  LOG.fatal(msg, new Exception());
+  LOG.error(msg, new Exception());
   synchronized(journalSetLock) {
-IOUtils.cleanup(LOG, journalSet);
+IOUtils.cleanupWithLogger(LOG, journalSet);
   }
   terminate(1, msg);
 }
@@ -772,7 +771,7 @@ public class FSEditLog implements LogsPurgeable {
 buf.append(editLogStream.getNumSync());
 buf.append(" SyncTimes(ms): ");
 buf.append(journalSet.getSyncTimes());
-LOG.info(buf);
+LOG.info(buf.toString());
   }
 
   /** Record the RPC IDs if necessary */
@@ -1711,7 +1710,7 @@ public class FSEditLog implements LogsPurgeable {
   if (recovery != null) {
 // If recovery mode is enabled, continue loading even if we know we
 // can't load up to toAtLeastTxId.

[06/50] [abbrv] hadoop git commit: HADOOP-15024 Support user agent configuration and include that & Hadoop version information to oss server. Contributed by Sammi Chen.

2017-11-28 Thread kkaranasos
HADOOP-15024 Support user agent configuration and include that & Hadoop version 
information to oss server.
Contributed by Sammi Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c326fc89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c326fc89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c326fc89

Branch: refs/heads/YARN-6592
Commit: c326fc89b06a8fe0978306378ba217748c7f2054
Parents: 9fb4eff
Author: Steve Loughran 
Authored: Mon Nov 20 18:56:42 2017 +
Committer: Steve Loughran 
Committed: Mon Nov 20 18:56:42 2017 +

--
 .../apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java | 4 
 .../main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java  | 7 +++
 .../src/site/markdown/tools/hadoop-aliyun/index.md| 2 +-
 3 files changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c326fc89/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index 2e8edc7..a7f13c0 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.util.VersionInfo;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -101,6 +102,9 @@ public class AliyunOSSFileSystemStore {
 ESTABLISH_TIMEOUT_DEFAULT));
 clientConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT_KEY,
 SOCKET_TIMEOUT_DEFAULT));
+clientConf.setUserAgent(
+conf.get(USER_AGENT_PREFIX, USER_AGENT_PREFIX_DEFAULT) + ", Hadoop/"
++ VersionInfo.getVersion());
 
 String proxyHost = conf.getTrimmed(PROXY_HOST_KEY, "");
 int proxyPort = conf.getInt(PROXY_PORT_KEY, -1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c326fc89/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java
index 04a2ccd..baa171f 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.fs.aliyun.oss;
 
+import com.aliyun.oss.common.utils.VersionInfoUtils;
+
 /**
  * ALL configuration constants for OSS filesystem.
  */
@@ -26,6 +28,11 @@ public final class Constants {
   private Constants() {
   }
 
+  // User agent
+  public static final String USER_AGENT_PREFIX = "fs.oss.user.agent.prefix";
+  public static final String USER_AGENT_PREFIX_DEFAULT =
+  VersionInfoUtils.getDefaultUserAgent();
+
   // Class of credential provider
   public static final String ALIYUN_OSS_CREDENTIALS_PROVIDER_KEY =
   "fs.oss.credentials.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c326fc89/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md 
b/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
index 2913279..9f24ce6 100644
--- a/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
+++ b/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
@@ -274,7 +274,7 @@ XInclude inclusion. Here is an example of 
`contract-test-options.xml`:
 
   
 fs.oss.impl
-org.apache.hadoop.fs.aliyun.AliyunOSSFileSystem
+org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: YARN-7527. Over-allocate node resource in async-scheduling mode of CapacityScheduler. (Tao Yang via wangda)

2017-11-28 Thread kkaranasos
YARN-7527. Over-allocate node resource in async-scheduling mode of 
CapacityScheduler. (Tao Yang via wangda)

Change-Id: I51ae6c2ab7a3d1febdd7d8d0519b63a13295ac7d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d781dd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d781dd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d781dd0

Branch: refs/heads/YARN-6592
Commit: 0d781dd03b979d65de94978071b2faa55005b34a
Parents: c326fc8
Author: Wangda Tan 
Authored: Mon Nov 20 11:48:15 2017 -0800
Committer: Wangda Tan 
Committed: Mon Nov 20 11:48:15 2017 -0800

--
 .../scheduler/common/fica/FiCaSchedulerApp.java |  4 +-
 .../TestCapacitySchedulerAsyncScheduling.java   | 71 
 2 files changed, 74 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d781dd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 40405fc..e9bee14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -417,7 +417,9 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
 
   // Common part of check container allocation regardless if it is a
   // increase container or regular container
-  commonCheckContainerAllocation(allocation, schedulerContainer);
+  if (!commonCheckContainerAllocation(allocation, schedulerContainer)) 
{
+return false;
+  }
 } else {
   // Container reserved first time will be NEW, after the container
   // accepted & confirmed, it will become RESERVED state

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d781dd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index 0c3130d..77596e2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -405,6 +405,77 @@ public class TestCapacitySchedulerAsyncScheduling {
 rm.stop();
   }
 
+  @Test (timeout = 3)
+  public void testNodeResourceOverAllocated()
+  throws Exception {
+// disable async-scheduling for simulating complex scene
+Configuration disableAsyncConf = new Configuration(conf);
+disableAsyncConf.setBoolean(
+CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false);
+
+// init RM & NMs & Nodes
+final MockRM rm = new MockRM(disableAsyncConf);
+rm.start();
+final MockNM nm1 = rm.registerNode("h1:1234", 9 * GB);
+final MockNM nm2 = rm.registerNode("h2:1234", 9 * GB);
+List nmLst = new ArrayList<>();
+nmLst.add(nm1);
+nmLst.add(nm2);
+
+// init scheduler & nodes
+while (
+((CapacityScheduler) rm.getRMContext().getScheduler()).getNodeTracker()
+.nodeCount() < 2) {
+  Thread.sleep(10);
+}
+Assert.assertEquals(2,
+((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+  

[21/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitterFactory.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitterFactory.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitterFactory.java
new file mode 100644
index 000..b446f22
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitterFactory.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit.staging;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitterFactory;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter;
+
+/**
+ * Factory for the {@link PartitionedStagingCommitter}.
+ */
+public class PartitionedStagingCommitterFactory
+extends AbstractS3ACommitterFactory {
+
+  /**
+   * Name of this class: {@value}.
+   */
+  public static final String CLASSNAME
+  = "org.apache.hadoop.fs.s3a.commit.staging"
+  + ".PartitionedStagingCommitterFactory";
+
+  public PathOutputCommitter createTaskCommitter(S3AFileSystem fileSystem,
+  Path outputPath,
+  TaskAttemptContext context) throws IOException {
+return new PartitionedStagingCommitter(outputPath, context);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
new file mode 100644
index 000..a4d39d7
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit.staging;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.UncheckedExecutionException;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsDirectoryException;
+import org.apache.hadoop.fs.s3a.Constants;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
+import static 

[01/50] [abbrv] hadoop git commit: YARN-7525. Incorrect query parameters in cluster nodes REST API document. Contributed by Tao Yang. [Forced Update!]

2017-11-28 Thread kkaranasos
Repository: hadoop
Updated Branches:
  refs/heads/YARN-6592 b9f0e942f -> 2d5d3f127 (forced update)


YARN-7525. Incorrect query parameters in cluster nodes REST API document. 
Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96e6a993
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96e6a993
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96e6a993

Branch: refs/heads/YARN-6592
Commit: 96e6a993609d170b4d2c70b2dba8b4530093606f
Parents: d5f6688
Author: bibinchundatt 
Authored: Sat Nov 18 19:02:11 2017 +0530
Committer: bibinchundatt 
Committed: Sat Nov 18 19:02:11 2017 +0530

--
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md   | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96e6a993/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index f8048a8..f478403 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -2107,8 +2107,7 @@ With the Nodes API, you can obtain a collection of 
resources, each of which repr
 
 ### Query Parameters Supported
 
-  * state - the state of the node
-  * healthy - true or false
+  * states - the states of the node, specified as a comma-separated list, 
valid values are: NEW, RUNNING, UNHEALTHY, DECOMMISSIONING, DECOMMISSIONED, 
LOST, REBOOTED, SHUTDOWN
 
 ### Elements of the *nodes* object
 
@@ -2286,7 +2285,7 @@ Use the following URI to obtain a Node Object, from a 
node identified by the nod
 | Item | Data Type | Description |
 |: |: |: |
 | rack | string | The rack location of this node |
-| state | string | State of the node - valid values are: NEW, RUNNING, 
UNHEALTHY, DECOMMISSIONING, DECOMMISSIONED, LOST, REBOOTED |
+| state | string | State of the node - valid values are: NEW, RUNNING, 
UNHEALTHY, DECOMMISSIONING, DECOMMISSIONED, LOST, REBOOTED, SHUTDOWN |
 | id | string | The node id |
 | nodeHostName | string | The host name of the node |
 | nodeHTTPAddress | string | The nodes HTTP address |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: MAPREDUCE-7011. TestClientDistributedCacheManager::testDetermineCacheVisibilities assumes all parent dirs set other exec

2017-11-28 Thread kkaranasos
MAPREDUCE-7011. 
TestClientDistributedCacheManager::testDetermineCacheVisibilities assumes all 
parent dirs set other exec


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/782ba3bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/782ba3bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/782ba3bf

Branch: refs/heads/YARN-6592
Commit: 782ba3bf9da52699b27405a3f147464975d1df99
Parents: 03c311e
Author: Chris Douglas 
Authored: Tue Nov 21 20:42:28 2017 -0800
Committer: Chris Douglas 
Committed: Tue Nov 21 20:42:28 2017 -0800

--
 .../TestClientDistributedCacheManager.java  | 28 +---
 1 file changed, 18 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/782ba3bf/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
index b427f39..a61e938 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
@@ -34,12 +34,15 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.MRJobConfig;
+
 import org.junit.After;
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -97,15 +100,15 @@ public class TestClientDistributedCacheManager {
 FileStatus firstStatus = statCache.get(firstCacheFile.toUri());
 FileStatus secondStatus = statCache.get(secondCacheFile.toUri());
 
-Assert.assertNotNull(firstCacheFile + " was not found in the stats cache",
+assertNotNull(firstCacheFile + " was not found in the stats cache",
 firstStatus);
-Assert.assertNotNull(secondCacheFile + " was not found in the stats cache",
+assertNotNull(secondCacheFile + " was not found in the stats cache",
 secondStatus);
-Assert.assertEquals("Missing/extra entries found in the stas cache",
+assertEquals("Missing/extra entries found in the stats cache",
 2, statCache.size());
 String expected = firstStatus.getModificationTime() + ","
 + secondStatus.getModificationTime();
-Assert.assertEquals(expected, 
jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
+assertEquals(expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
 
 job = Job.getInstance(conf);
 job.addCacheFile(new Path(TEST_VISIBILITY_CHILD_DIR, "*").toUri());
@@ -115,12 +118,12 @@ public class TestClientDistributedCacheManager {
 
 FileStatus thirdStatus = statCache.get(TEST_VISIBILITY_CHILD_DIR.toUri());
 
-Assert.assertEquals("Missing/extra entries found in the stas cache",
+assertEquals("Missing/extra entries found in the stats cache",
 1, statCache.size());
-Assert.assertNotNull(TEST_VISIBILITY_CHILD_DIR
+assertNotNull(TEST_VISIBILITY_CHILD_DIR
 + " was not found in the stats cache", thirdStatus);
 expected = Long.toString(thirdStatus.getModificationTime());
-Assert.assertEquals("Incorrect timestamp for " + TEST_VISIBILITY_CHILD_DIR,
+assertEquals("Incorrect timestamp for " + TEST_VISIBILITY_CHILD_DIR,
 expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
   }
   
@@ -141,6 +144,11 @@ public class TestClientDistributedCacheManager {
 job.addCacheFile(relativePath.toUri());
 jobConf = job.getConfiguration();
 
+// skip test if scratch dir is not PUBLIC
+assumeTrue(TEST_VISIBILITY_PARENT_DIR + " is not public",
+ClientDistributedCacheManager.isPublic(
+jobConf, TEST_VISIBILITY_PARENT_DIR.toUri(), statCache));
+
 ClientDistributedCacheManager.determineCacheVisibilities(jobConf,
 statCache);
   

[02/50] [abbrv] hadoop git commit: YARN-7489. ConcurrentModificationException in RMAppImpl#getRMAppMetrics. Contributed by Tao Yang.

2017-11-28 Thread kkaranasos
YARN-7489. ConcurrentModificationException in RMAppImpl#getRMAppMetrics. 
Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5b81a4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5b81a4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5b81a4f

Branch: refs/heads/YARN-6592
Commit: b5b81a4f086126974c75dab9b54aea20a0c1a48f
Parents: 96e6a99
Author: bibinchundatt 
Authored: Sat Nov 18 19:25:29 2017 +0530
Committer: bibinchundatt 
Committed: Sat Nov 18 19:25:29 2017 +0530

--
 .../server/resourcemanager/rmapp/RMAppImpl.java | 58 +++-
 1 file changed, 31 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5b81a4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 85d355f..6896254 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -1641,35 +1641,39 @@ public class RMAppImpl implements RMApp, Recoverable {
 int numNonAMContainerPreempted = 0;
 Map resourceSecondsMap = new HashMap<>();
 Map preemptedSecondsMap = new HashMap<>();
-
-for (RMAppAttempt attempt : attempts.values()) {
-  if (null != attempt) {
-RMAppAttemptMetrics attemptMetrics =
-attempt.getRMAppAttemptMetrics();
-Resources.addTo(resourcePreempted,
-attemptMetrics.getResourcePreempted());
-numAMContainerPreempted += attemptMetrics.getIsPreempted() ? 1 : 0;
-numNonAMContainerPreempted +=
-attemptMetrics.getNumNonAMContainersPreempted();
-// getAggregateAppResourceUsage() will calculate resource usage stats
-// for both running and finished containers.
-AggregateAppResourceUsage resUsage =
-attempt.getRMAppAttemptMetrics().getAggregateAppResourceUsage();
-for (Map.Entry entry : resUsage
-.getResourceUsageSecondsMap().entrySet()) {
-  long value = RMServerUtils
-  .getOrDefault(resourceSecondsMap, entry.getKey(), 0L);
-  value += entry.getValue();
-  resourceSecondsMap.put(entry.getKey(), value);
-}
-for (Map.Entry entry : attemptMetrics
-.getPreemptedResourceSecondsMap().entrySet()) {
-  long value = RMServerUtils
-  .getOrDefault(preemptedSecondsMap, entry.getKey(), 0L);
-  value += entry.getValue();
-  preemptedSecondsMap.put(entry.getKey(), value);
+this.readLock.lock();
+try {
+  for (RMAppAttempt attempt : attempts.values()) {
+if (null != attempt) {
+  RMAppAttemptMetrics attemptMetrics =
+  attempt.getRMAppAttemptMetrics();
+  Resources.addTo(resourcePreempted,
+  attemptMetrics.getResourcePreempted());
+  numAMContainerPreempted += attemptMetrics.getIsPreempted() ? 1 : 0;
+  numNonAMContainerPreempted +=
+  attemptMetrics.getNumNonAMContainersPreempted();
+  // getAggregateAppResourceUsage() will calculate resource usage stats
+  // for both running and finished containers.
+  AggregateAppResourceUsage resUsage =
+  attempt.getRMAppAttemptMetrics().getAggregateAppResourceUsage();
+  for (Map.Entry entry : resUsage
+  .getResourceUsageSecondsMap().entrySet()) {
+long value = RMServerUtils
+.getOrDefault(resourceSecondsMap, entry.getKey(), 0L);
+value += entry.getValue();
+resourceSecondsMap.put(entry.getKey(), value);
+  }
+  for (Map.Entry entry : attemptMetrics
+  .getPreemptedResourceSecondsMap().entrySet()) {
+long value = RMServerUtils
+.getOrDefault(preemptedSecondsMap, entry.getKey(), 0L);
+value += entry.getValue();
+preemptedSecondsMap.put(entry.getKey(), 

[15/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
new file mode 100644
index 000..2c348f5
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
@@ -0,0 +1,696 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit.staging;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
+import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
+import com.google.common.collect.Sets;
+import org.hamcrest.core.StringStartsWith;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.AWSClientIOException;
+import org.apache.hadoop.fs.s3a.MockS3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.commit.files.PendingSet;
+import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.task.JobContextImpl;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
+import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.*;
+import static 
org.apache.hadoop.fs.s3a.commit.staging.StagingCommitterConstants.*;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.commit.staging.Paths.*;
+import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.*;
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * The main unit test suite of the staging committer.
+ * Parameterized on thread count and unique filename policy.
+ */
+@RunWith(Parameterized.class)
+public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
+
+  private static final JobID JOB_ID = new JobID("job", 1);
+  private static final TaskAttemptID AID = new TaskAttemptID(
+  new TaskID(JOB_ID, TaskType.REDUCE, 2), 3);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStagingCommitter.class);
+
+  private final int numThreads;
+  private final boolean uniqueFilenames;
+  private JobContext job = null;
+  private TaskAttemptContext tac = null;
+  private Configuration conf = null;
+  private MockedStagingCommitter jobCommitter = null;
+  private MockedStagingCommitter committer = null;
+
+  // created in Before
+  private S3AFileSystem mockFS = null;
+  private MockS3AFileSystem wrapperFS = null;
+
+  // created in Before
+  private StagingTestBase.ClientResults results = null;
+  private StagingTestBase.ClientErrors errors = null;
+  private AmazonS3 

[14/50] [abbrv] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread kkaranasos
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
index 230dbad..02236eb 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
@@ -26,6 +26,7 @@ import com.amazonaws.event.ProgressEvent;
 import com.amazonaws.event.ProgressEventType;
 import com.amazonaws.event.ProgressListener;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.s3a.S3ATestUtils;
 import org.junit.FixMethodOrder;
 import org.junit.Test;
 import org.junit.runners.MethodSorters;
@@ -70,16 +71,26 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
 
   private int uploadBlockSize = DEFAULT_UPLOAD_BLOCKSIZE;
   private int partitionSize;
+  private long filesize;
 
   @Override
   public void setup() throws Exception {
 super.setup();
-final Path testPath = getTestPath();
-scaleTestDir = new Path(testPath, "scale");
+scaleTestDir = new Path(getTestPath(), getTestSuiteName());
 hugefile = new Path(scaleTestDir, "hugefile");
 hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+filesize = getTestPropertyBytes(getConf(), KEY_HUGE_FILESIZE,
+DEFAULT_HUGE_FILESIZE);
   }
 
+  /**
+   * Get the name of this test suite, which is used in path generation.
+   * Base implementation uses {@link #getBlockOutputBufferName()} for this.
+   * @return the name of the suite.
+   */
+  public String getTestSuiteName() {
+return getBlockOutputBufferName();
+  }
 
   /**
* Note that this can get called before test setup.
@@ -88,7 +99,7 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
   @Override
   protected Configuration createScaleConfiguration() {
 Configuration conf = super.createScaleConfiguration();
-partitionSize = (int)getTestPropertyBytes(conf,
+partitionSize = (int) getTestPropertyBytes(conf,
 KEY_HUGE_PARTITION_SIZE,
 DEFAULT_PARTITION_SIZE);
 assertTrue("Partition size too small: " + partitionSize,
@@ -99,6 +110,7 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
 conf.setInt(MULTIPART_SIZE, partitionSize);
 conf.set(USER_AGENT_PREFIX, "STestS3AHugeFileCreate");
 conf.set(FAST_UPLOAD_BUFFER, getBlockOutputBufferName());
+S3ATestUtils.disableFilesystemCaching(conf);
 return conf;
   }
 
@@ -111,17 +123,16 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
   @Test
   public void test_010_CreateHugeFile() throws IOException {
 assertFalse("Please run this test sequentially to avoid timeouts" +
-" and bandwidth problems", isParallelExecution());
-long filesize = getTestPropertyBytes(getConf(), KEY_HUGE_FILESIZE,
-DEFAULT_HUGE_FILESIZE);
+" and bandwidth problems", isParallelExecution());
 long filesizeMB = filesize / _1MB;
 
 // clean up from any previous attempts
 deleteHugeFile();
 
+Path fileToCreate = getPathOfFileToCreate();
 describe("Creating file %s of size %d MB" +
 " with partition size %d buffered by %s",
-hugefile, filesizeMB, partitionSize, getBlockOutputBufferName());
+fileToCreate, filesizeMB, partitionSize, getBlockOutputBufferName());
 
 // now do a check of available upload time, with a pessimistic bandwidth
 // (that of remote upload tests). If the test times out then not only is
@@ -134,7 +145,7 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
 assertTrue(String.format("Timeout set in %s seconds is too low;" +
 " estimating upload time of %d seconds at 1 MB/s." +
 " Rerun tests with -D%s=%d",
-timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2),
+timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2),
 uploadTime < timeout);
 assertEquals("File size set in " + KEY_HUGE_FILESIZE + " = " + filesize
 + " is not a multiple of " + uploadBlockSize,
@@ -162,7 +173,7 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
 S3AInstrumentation.OutputStreamStatistics streamStatistics;
 long blocksPer10MB = blocksPerMB * 10;
 ProgressCallback progress = new ProgressCallback(timer);
-try (FSDataOutputStream out = fs.create(hugefile,
+try (FSDataOutputStream out = fs.create(fileToCreate,
 true,
 uploadBlockSize,
 progress)) {
@@ -219,14 +230,8 @@ public abstract class AbstractSTestS3AHugeFiles 

[03/50] [abbrv] hadoop git commit: HADOOP-13514. Upgrade maven surefire plugin to 2.20.1

2017-11-28 Thread kkaranasos
HADOOP-13514. Upgrade maven surefire plugin to 2.20.1

Signed-off-by: Allen Wittenauer 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6903cf09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6903cf09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6903cf09

Branch: refs/heads/YARN-6592
Commit: 6903cf096e702c8d4827d4e72cd5638c3a3b5429
Parents: b5b81a4
Author: Akira Ajisaka 
Authored: Sun Nov 19 12:39:02 2017 -0800
Committer: Allen Wittenauer 
Committed: Sun Nov 19 12:39:37 2017 -0800

--
 BUILDING.txt  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml | 3 +++
 hadoop-project/pom.xml| 5 +++--
 hadoop-tools/hadoop-aws/pom.xml   | 2 ++
 hadoop-tools/hadoop-azure/pom.xml | 3 +++
 5 files changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6903cf09/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 47aaab4..9955563 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -331,10 +331,10 @@ If the build process fails with an out of memory error, 
you should be able to fi
 it by increasing the memory used by maven which can be done via the environment
 variable MAVEN_OPTS.
 
-Here is an example setting to allocate between 256 and 512 MB of heap space to
+Here is an example setting to allocate between 256 MB and 1 GB of heap space to
 Maven
 
-export MAVEN_OPTS="-Xms256m -Xmx512m"
+export MAVEN_OPTS="-Xms256m -Xmx1g"
 
 
--
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6903cf09/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml 
b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
index 1295b59..89b9645 100644
--- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
+++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
@@ -128,6 +128,9 @@
   integration-test
   verify
 
+
+  false
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6903cf09/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f99e413..c4dc1bf 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -118,7 +118,7 @@
 
 
 -Xmx2048m 
-XX:+HeapDumpOnOutOfMemoryError
-2.17
+2.20.1
 
${maven-surefire-plugin.version}
 
${maven-surefire-plugin.version}
 
@@ -1602,6 +1602,7 @@
 
${env.DYLD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib
 4
   
+  false
   
 
 ${project.build.directory}/log
@@ -1612,7 +1613,7 @@
 ${test.build.data}
 ${test.build.webapps}
 ${test.cache.data}
-${test.build.classes}
+
${project.build.directory}/test-classes
 
 true
 
${project.build.directory}/test-classes/krb5.conf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6903cf09/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 47788cd..97ceddf 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -153,6 +153,7 @@
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 true
@@ -207,6 +208,7 @@
 
 
   
${fs.s3a.scale.test.timeout}
+  false
   
 
 false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6903cf09/hadoop-tools/hadoop-azure/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure/pom.xml 
b/hadoop-tools/hadoop-azure/pom.xml
index b479872..1728b60 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -349,6 +349,7 @@
   false
   

[27/36] hadoop git commit: MAPREDUCE-7014. Fix java doc errors in jdk1.8. Contributed by Steve Loughran.

2017-11-28 Thread aengineer
MAPREDUCE-7014. Fix java doc errors in jdk1.8. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cd75845
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cd75845
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cd75845

Branch: refs/heads/HDFS-7240
Commit: 3cd75845da1aced3d88e0ce68c68e8d95f48fb79
Parents: 2bde3ae
Author: Rohith Sharma K S 
Authored: Mon Nov 27 22:01:00 2017 +0530
Committer: Rohith Sharma K S 
Committed: Mon Nov 27 22:01:00 2017 +0530

--
 .../lib/output/PathOutputCommitterFactory.java  | 12 ++--
 .../src/main/java/org/apache/hadoop/fs/s3a/Invoker.java |  4 +++-
 .../java/org/apache/hadoop/fs/s3a/S3AFileSystem.java|  2 +-
 .../main/java/org/apache/hadoop/fs/s3a/S3AUtils.java|  4 
 .../org/apache/hadoop/fs/s3a/WriteOperationHelper.java  |  1 +
 .../hadoop/fs/s3a/commit/AbstractS3ACommitter.java  |  1 +
 .../apache/hadoop/fs/s3a/commit/CommitOperations.java   |  2 +-
 .../hadoop/fs/s3a/commit/staging/StagingCommitter.java  |  1 +
 .../hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java|  2 +-
 9 files changed, 19 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cd75845/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
index 0df14d1..7d214f2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitterFactory.java
@@ -39,12 +39,12 @@ import org.apache.hadoop.util.ReflectionUtils;
  *
  * Algorithm:
  * 
- *   If an explicit committer factory is named, it is used.
- *   The output path is examined.
+ *   If an explicit committer factory is named, it is used.
+ *   The output path is examined.
  *   If is non null and there is an explicit schema for that filesystem,
- *   its factory is instantiated.
- *   Otherwise, an instance of {@link FileOutputCommitter} is
- *   created.
+ *   its factory is instantiated.
+ *   Otherwise, an instance of {@link FileOutputCommitter} is
+ *   created.
  * 
  *
  * In {@link FileOutputFormat}, the created factory has its method
@@ -186,7 +186,7 @@ public class PathOutputCommitterFactory extends Configured {
   }
 
   /**
-   * Create the committer factory for a task attempt & destination, then
+   * Create the committer factory for a task attempt and destination, then
* create the committer from it.
* @param outputPath the task's output path, or or null if no output path
* has been defined.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cd75845/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
index 9900f4c..107a247 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.io.retry.RetryPolicy;
  *
  * The static {@link #quietly(String, String, VoidOperation)} and
  * {@link #quietlyEval(String, String, Operation)} calls exist to take any
- * operation and quietly catch & log at debug. The return value of
+ * operation and quietly catch and log at debug. The return value of
  * {@link #quietlyEval(String, String, Operation)} is a java 8 optional,
  * which can then be used in java8-expressions.
  */
@@ -390,9 +390,11 @@ public class Invoker {
* Execute an operation; any exception raised is caught and
* logged at debug.
* The result is only non-empty if the operation succeeded
+   * @param  type to return
* @param action action to execute
* @param path path (for exception construction)
* @param operation operation
+   * @return the result of a successful operation
*/
   public static  Optional quietlyEval(String action,
   String 

[29/36] hadoop git commit: YARN-7363. ContainerLocalizer don't have a valid log4j config in case of Linux container executor. (Contributed by Yufei Gu)

2017-11-28 Thread aengineer
YARN-7363. ContainerLocalizer don't have a valid log4j config in case of Linux 
container executor. (Contributed by Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8923cdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8923cdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8923cdb

Branch: refs/heads/HDFS-7240
Commit: d8923cdbf1567aee10a54f144fef734d1465ebed
Parents: fedabca
Author: Yufei Gu 
Authored: Mon Nov 27 11:47:11 2017 -0800
Committer: Yufei Gu 
Committed: Mon Nov 27 14:31:52 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 +++
 .../src/main/resources/yarn-default.xml |  8 
 .../nodemanager/LinuxContainerExecutor.java | 28 +++-
 .../WindowsSecureContainerExecutor.java |  2 +-
 .../localizer/ContainerLocalizer.java   | 46 +++-
 .../TestLinuxContainerExecutorWithMocks.java| 19 +---
 6 files changed, 98 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8923cdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ead9977..c1024ea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1675,6 +1675,12 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_CONTAINER_LOCALIZER_JAVA_OPTS_DEFAULT =
   "-Xmx256m";
 
+  /** The log level of container localizer process. */
+  public static final String NM_CONTAINER_LOCALIZER_LOG_LEVEL=
+  NM_PREFIX + "container-localizer.log.level";
+  public static final String NM_CONTAINER_LOCALIZER_LOG_LEVEL_DEFAULT =
+  "INFO";
+
   /** Prefix for runtime configuration constants. */
   public static final String LINUX_CONTAINER_RUNTIME_PREFIX = NM_PREFIX +
   "runtime.linux.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8923cdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 12cb902..dd9c6bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1165,6 +1165,14 @@
 
   
 
+  The log level for container localizer while it is an independent process.
+
+yarn.nodemanager.container-localizer.log.level
+INFO
+  
+
+  
+
   Where to store container logs. An application's localized log directory
   will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
   Individual containers' log directories will be below this, in 
directories 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8923cdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index e8c46a2..eaf664f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ConfigurationException;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;

[11/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java
new file mode 100644
index 000..b6b6b97
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/Tasks.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class for parallel execution, takes closures for the various
+ * actions.
+ * There is no retry logic: it is expected to be handled by the closures.
+ */
+public final class Tasks {
+  private static final Logger LOG = LoggerFactory.getLogger(Tasks.class);
+
+  private Tasks() {
+  }
+
+  /**
+   * Callback invoked to process an item.
+   * @param  item type being processed
+   * @param  exception class which may be raised
+   */
+  @FunctionalInterface
+  public interface Task {
+void run(I item) throws E;
+  }
+
+  /**
+   * Callback invoked on a failure.
+   * @param  item type being processed
+   * @param  exception class which may be raised
+   */
+  @FunctionalInterface
+  public interface FailureTask {
+
+/**
+ * process a failure.
+ * @param item item the task is processing
+ * @param exception the exception which was raised.
+ * @throws E Exception of type E
+ */
+void run(I item, Exception exception) throws E;
+  }
+
+  /**
+   * Builder for task execution.
+   * @param  item type
+   */
+  public static class Builder {
+private final Iterable items;
+private ExecutorService service = null;
+private FailureTask onFailure = null;
+private boolean stopOnFailure = false;
+private boolean suppressExceptions = false;
+private Task revertTask = null;
+private boolean stopRevertsOnFailure = false;
+private Task abortTask = null;
+private boolean stopAbortsOnFailure = false;
+
+/**
+ * Create the builder.
+ * @param items items to process
+ */
+Builder(Iterable items) {
+  this.items = items;
+}
+
+/**
+ * Declare executor service: if null, the tasks are executed in a single
+ * thread.
+ * @param executorService service to schedule tasks with.
+ * @return this builder.
+ */
+public Builder executeWith(ExecutorService executorService) {
+  this.service = executorService;
+  return this;
+}
+
+public Builder onFailure(FailureTask task) {
+  this.onFailure = task;
+  return this;
+}
+
+public Builder stopOnFailure() {
+  this.stopOnFailure = true;
+  return this;
+}
+
+public Builder suppressExceptions() {
+  return suppressExceptions(true);
+}
+
+public Builder suppressExceptions(boolean suppress) {
+  this.suppressExceptions = suppress;
+  return this;
+}
+
+public Builder revertWith(Task task) {
+  this.revertTask = task;
+  return this;
+}
+
+public Builder stopRevertsOnFailure() {
+  this.stopRevertsOnFailure = true;
+  return this;
+}
+
+public Builder abortWith(Task task) {
+  this.abortTask = task;
+  return this;
+}
+
+public Builder stopAbortsOnFailure() {
+  this.stopAbortsOnFailure = true;
+  return this;
+}
+
+public  boolean run(Task task) throws E {
+  if (service != null) {
+return runParallel(task);
+  } else {
+return runSingleThreaded(task);
+  }
+}
+
+private  boolean 

[06/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java
new file mode 100644
index 000..4d7f524
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java
@@ -0,0 +1,1371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.MapFile;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+import org.apache.hadoop.mapreduce.task.JobContextImpl;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.S3AUtils.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * Test the job/task commit actions of an S3A Committer, including trying to
+ * simulate some failure and retry conditions.
+ * Derived from
+ * {@code org.apache.hadoop.mapreduce.lib.output.TestFileOutputCommitter}.
+ *
+ * This is a complex test suite as it tries to explore the full lifecycle
+ * of committers, and is designed for subclassing.
+ */
+@SuppressWarnings({"unchecked", "ThrowableNotThrown", "unused"})
+public abstract class AbstractITCommitProtocol extends AbstractCommitITest {
+  private Path outDir;
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(AbstractITCommitProtocol.class);
+
+  private static final String SUB_DIR = "SUB_DIR";
+
+  protected static final String PART_0 = "part-m-0";
+
+  /**
+   * Counter to guarantee that even in parallel test runs, no job has the same
+   * ID.
+   */
+
+  private String jobId;
+
+  // A random task attempt id for testing.
+  private String attempt0;
+  private TaskAttemptID taskAttempt0;
+
+  private String attempt1;
+  private TaskAttemptID taskAttempt1;
+
+  private static final Text KEY_1 = new Text("key1");
+  private static final Text KEY_2 = new Text("key2");
+  private static final Text VAL_1 = new Text("val1");
+  private static final Text VAL_2 = new Text("val2");
+
+  /** A job to abort in test case teardown. */
+  

[02/36] hadoop git commit: MAPREDUCE-7011. TestClientDistributedCacheManager::testDetermineCacheVisibilities assumes all parent dirs set other exec

2017-11-28 Thread aengineer
MAPREDUCE-7011. 
TestClientDistributedCacheManager::testDetermineCacheVisibilities assumes all 
parent dirs set other exec


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/782ba3bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/782ba3bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/782ba3bf

Branch: refs/heads/HDFS-7240
Commit: 782ba3bf9da52699b27405a3f147464975d1df99
Parents: 03c311e
Author: Chris Douglas 
Authored: Tue Nov 21 20:42:28 2017 -0800
Committer: Chris Douglas 
Committed: Tue Nov 21 20:42:28 2017 -0800

--
 .../TestClientDistributedCacheManager.java  | 28 +---
 1 file changed, 18 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/782ba3bf/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
index b427f39..a61e938 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/filecache/TestClientDistributedCacheManager.java
@@ -34,12 +34,15 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.MRJobConfig;
+
 import org.junit.After;
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -97,15 +100,15 @@ public class TestClientDistributedCacheManager {
 FileStatus firstStatus = statCache.get(firstCacheFile.toUri());
 FileStatus secondStatus = statCache.get(secondCacheFile.toUri());
 
-Assert.assertNotNull(firstCacheFile + " was not found in the stats cache",
+assertNotNull(firstCacheFile + " was not found in the stats cache",
 firstStatus);
-Assert.assertNotNull(secondCacheFile + " was not found in the stats cache",
+assertNotNull(secondCacheFile + " was not found in the stats cache",
 secondStatus);
-Assert.assertEquals("Missing/extra entries found in the stas cache",
+assertEquals("Missing/extra entries found in the stats cache",
 2, statCache.size());
 String expected = firstStatus.getModificationTime() + ","
 + secondStatus.getModificationTime();
-Assert.assertEquals(expected, 
jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
+assertEquals(expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
 
 job = Job.getInstance(conf);
 job.addCacheFile(new Path(TEST_VISIBILITY_CHILD_DIR, "*").toUri());
@@ -115,12 +118,12 @@ public class TestClientDistributedCacheManager {
 
 FileStatus thirdStatus = statCache.get(TEST_VISIBILITY_CHILD_DIR.toUri());
 
-Assert.assertEquals("Missing/extra entries found in the stas cache",
+assertEquals("Missing/extra entries found in the stats cache",
 1, statCache.size());
-Assert.assertNotNull(TEST_VISIBILITY_CHILD_DIR
+assertNotNull(TEST_VISIBILITY_CHILD_DIR
 + " was not found in the stats cache", thirdStatus);
 expected = Long.toString(thirdStatus.getModificationTime());
-Assert.assertEquals("Incorrect timestamp for " + TEST_VISIBILITY_CHILD_DIR,
+assertEquals("Incorrect timestamp for " + TEST_VISIBILITY_CHILD_DIR,
 expected, jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
   }
   
@@ -141,6 +144,11 @@ public class TestClientDistributedCacheManager {
 job.addCacheFile(relativePath.toUri());
 jobConf = job.getConfiguration();
 
+// skip test if scratch dir is not PUBLIC
+assumeTrue(TEST_VISIBILITY_PARENT_DIR + " is not public",
+ClientDistributedCacheManager.isPublic(
+jobConf, TEST_VISIBILITY_PARENT_DIR.toUri(), statCache));
+
 ClientDistributedCacheManager.determineCacheVisibilities(jobConf,
 statCache);
   

[18/36] hadoop git commit: HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. Contributed by Lei (Eddy) Xu.

2017-11-28 Thread aengineer
HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/785732c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/785732c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/785732c1

Branch: refs/heads/HDFS-7240
Commit: 785732c13e2ebe9f27350b6be82eb2fb782d7dc4
Parents: d42a336
Author: Lei Xu 
Authored: Wed Nov 22 10:19:58 2017 -0800
Committer: Lei Xu 
Committed: Wed Nov 22 10:22:32 2017 -0800

--
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 6293 -> 6753 bytes
 .../src/test/resources/editsStored.xml  | 750 +++
 2 files changed, 423 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/785732c1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
index 8029575..3f2817a 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/785732c1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
index 0a1c25e..2a57c73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
@@ -13,8 +13,8 @@
   2
   
 1
-1423097579620
-ef3f2032e2797e8e
+1512000829976
+e7457bcc6ab95a84
   
 
   
@@ -24,8 +24,8 @@
   3
   
 2
-1423097579622
-b978ed731a0b4a65
+1512000829980
+07cc38caf6c47bb4
   
 
   
@@ -37,19 +37,19 @@
   16386
   /file_create
   1
-  1422406380345
-  1422406380345
+  1511309632199
+  1511309632199
   512
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   true
   
-xyao
+lei
 supergroup
 420
   
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  6
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  5
 
   
   
@@ -60,14 +60,14 @@
   0
   /file_create
   1
-  1422406380369
-  1422406380345
+  1511309632248
+  1511309632199
   512
   
   
   false
   
-xyao
+lei
 supergroup
 420
   
@@ -78,11 +78,11 @@
 
   6
   /file_create
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   false
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  8
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  7
 
   
   
@@ -93,23 +93,118 @@
   0
   /file_create
   1
-  1422406380376
-  1422406380345
+  1511309632263
+  1511309632199
   512
   
   
   false
   
-xyao
+lei
 supergroup
 420
   
 
   
   
-OP_SET_STORAGE_POLICY
+OP_ADD
 
   8
+  0
+  16387
+  /update_blocks
+  1
+  1511309632266
+  1511309632266
+  4096
+  DFSClient_NONMAPREDUCE_2134933941_1
+  127.0.0.1
+  true
+  
+lei
+supergroup
+420
+  
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  9
+
+  
+  
+OP_ALLOCATE_BLOCK_ID
+
+  9
+  1073741825
+
+  
+  
+OP_SET_GENSTAMP_V2
+
+  10
+  1001
+
+  
+  
+OP_ADD_BLOCK
+
+  11
+  /update_blocks
+  
+1073741825
+0
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  12
+  /update_blocks
+  
+1073741825
+1
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  13
+  /update_blocks
+  
+  -2
+
+  
+  
+OP_CLOSE
+
+  14
+  0
+  0
+  /update_blocks
+  1
+  1511309632454
+  1511309632266
+  4096
+  
+  
+  false
+  
+lei
+supergroup
+420
+  
+
+  
+  
+OP_SET_STORAGE_POLICY
+
+  15
   /file_create
   7
 
@@ -117,36 +212,36 @@
   
 OP_RENAME_OLD
 
-  9
+  16
   0
   /file_create
   /file_moved
-  1422406380383
-  

[28/36] hadoop git commit: YARN-6168. Restarted RM may not inform AM about all existing containers. Contributed by Chandni Singh

2017-11-28 Thread aengineer
YARN-6168. Restarted RM may not inform AM about all existing containers. 
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fedabcad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fedabcad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fedabcad

Branch: refs/heads/HDFS-7240
Commit: fedabcad42067ac7dd24de40fab6be2d3485a540
Parents: 3cd7584
Author: Jian He 
Authored: Mon Nov 27 09:55:08 2017 -0800
Committer: Jian He 
Committed: Mon Nov 27 10:19:58 2017 -0800

--
 .../api/protocolrecords/AllocateResponse.java   |  54 +++
 .../src/main/proto/yarn_service_protos.proto|   1 +
 .../impl/pb/AllocateResponsePBImpl.java |  37 +
 .../resourcemanager/DefaultAMSProcessor.java|   3 +
 .../scheduler/AbstractYarnScheduler.java|   4 +-
 .../resourcemanager/scheduler/Allocation.java   |  13 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  48 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |   5 +-
 .../scheduler/fair/FairScheduler.java   |   3 +-
 .../applicationsmanager/TestAMRestart.java  | 149 +++
 10 files changed, 310 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fedabcad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
index 9b254ae..98346ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -372,6 +372,44 @@ public abstract class AllocateResponse {
   public void setUpdateErrors(List updateErrors) {
   }
 
+  /**
+   * Get the list of running containers as viewed by
+   * ResourceManager from previous application attempts which
+   * have not been reported to the Application Master yet.
+   * 
+   * These containers were recovered by the RM after the application master
+   * had already registered. This may happen after RM restart when some NMs get
+   * delayed in connecting to the RM and reporting the active containers.
+   * Since they were not reported in the registration
+   * response, they are reported in the response to the AM heartbeat.
+   *
+   * @return the list of running containers as viewed by
+   * ResourceManager from previous application attempts.
+   */
+  @Public
+  @Unstable
+  public abstract List getContainersFromPreviousAttempts();
+
+  /**
+   * Set the list of running containers as viewed by
+   * ResourceManager from previous application attempts which have
+   * not been reported to the Application Master yet.
+   * 
+   * These containers were recovered by the RM after the application master
+   * had already registered. This may happen after RM restart when some NMs get
+   * delayed in connecting to the RM and reporting the active containers.
+   * Since they were not reported in the registration
+   * response, they are reported in the response to the AM heartbeat.
+   *
+   * @param containersFromPreviousAttempt
+   *  the list of running containers as viewed by
+   *  ResourceManager from previous application attempts.
+   */
+  @Private
+  @Unstable
+  public abstract void setContainersFromPreviousAttempts(
+  List containersFromPreviousAttempt);
+
   @Private
   @Unstable
   public static AllocateResponseBuilder newBuilder() {
@@ -590,6 +628,22 @@ public abstract class AllocateResponse {
 }
 
 /**
+ * Set the containersFromPreviousAttempt of the response.
+ * @see AllocateResponse#setContainersFromPreviousAttempts(List)
+ * @param containersFromPreviousAttempt
+ * containersFromPreviousAttempt of the response
+ * @return {@link AllocateResponseBuilder}
+ */
+@Private
+@Unstable
+public AllocateResponseBuilder containersFromPreviousAttempt(
+List containersFromPreviousAttempt) {
+  allocateResponse.setContainersFromPreviousAttempts(
+  containersFromPreviousAttempt);
+  return this;
+}
+
+/**
  * Return generated {@link AllocateResponse} object.
  * @return {@link AllocateResponse}
  */


[23/36] hadoop git commit: YARN-6483. Add nodes transitioning to DECOMMISSIONING state to the list of updated nodes returned to the AM. (Juan Rodriguez Hortala via asuresh)

2017-11-28 Thread aengineer
YARN-6483. Add nodes transitioning to DECOMMISSIONING state to the list of 
updated nodes returned to the AM. (Juan Rodriguez Hortala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b46ca7e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b46ca7e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b46ca7e7

Branch: refs/heads/HDFS-7240
Commit: b46ca7e73b8bac3fdbff0b13afe009308078acf2
Parents: aab4395
Author: Arun Suresh 
Authored: Wed Nov 22 19:16:44 2017 -0800
Committer: Arun Suresh 
Committed: Wed Nov 22 19:18:30 2017 -0800

--
 .../hadoop/yarn/api/records/NodeReport.java |  47 ++--
 .../hadoop/yarn/api/records/NodeUpdateType.java |  29 +
 .../src/main/proto/yarn_protos.proto|   8 ++
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  14 +--
 .../hadoop/yarn/client/cli/TestYarnCLI.java |   2 +-
 .../api/records/impl/pb/NodeReportPBImpl.java   |  50 +++-
 .../yarn/api/records/impl/pb/ProtoUtils.java|  12 ++
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  14 ++-
 .../server/resourcemanager/ClientRMService.java |   5 +-
 .../DecommissioningNodesWatcher.java|  38 +-
 .../resourcemanager/DefaultAMSProcessor.java|  12 +-
 .../resourcemanager/NodesListManager.java   |  78 +
 .../NodesListManagerEventType.java  |   3 +-
 .../server/resourcemanager/rmapp/RMApp.java |  10 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  11 +-
 .../rmapp/RMAppNodeUpdateEvent.java |   9 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   2 +-
 .../resourcemanager/rmnode/RMNodeImpl.java  |   5 +
 .../yarn/server/resourcemanager/MockRM.java |  15 +++
 .../resourcemanager/TestClientRMService.java|  50 
 .../TestDecommissioningNodesWatcher.java|   4 +-
 .../resourcemanager/TestRMNodeTransitions.java  |  13 ++-
 .../TestResourceTrackerService.java | 116 ++-
 .../applicationsmanager/MockAsm.java|   4 +-
 .../TestAMRMRPCNodeUpdates.java |  51 
 .../server/resourcemanager/rmapp/MockRMApp.java |   4 +-
 26 files changed, 495 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46ca7e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
index 885a3b4..3a80641 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
@@ -53,7 +53,8 @@ public abstract class NodeReport {
   String httpAddress, String rackName, Resource used, Resource capability,
   int numContainers, String healthReport, long lastHealthReportTime) {
 return newInstance(nodeId, nodeState, httpAddress, rackName, used,
-capability, numContainers, healthReport, lastHealthReportTime, null);
+capability, numContainers, healthReport, lastHealthReportTime,
+null, null, null);
   }
 
   @Private
@@ -61,7 +62,8 @@ public abstract class NodeReport {
   public static NodeReport newInstance(NodeId nodeId, NodeState nodeState,
   String httpAddress, String rackName, Resource used, Resource capability,
   int numContainers, String healthReport, long lastHealthReportTime,
-  Set nodeLabels) {
+  Set nodeLabels, Integer decommissioningTimeout,
+  NodeUpdateType nodeUpdateType) {
 NodeReport nodeReport = Records.newRecord(NodeReport.class);
 nodeReport.setNodeId(nodeId);
 nodeReport.setNodeState(nodeState);
@@ -73,6 +75,8 @@ public abstract class NodeReport {
 nodeReport.setHealthReport(healthReport);
 nodeReport.setLastHealthReportTime(lastHealthReportTime);
 nodeReport.setNodeLabels(nodeLabels);
+nodeReport.setDecommissioningTimeout(decommissioningTimeout);
+nodeReport.setNodeUpdateType(nodeUpdateType);
 return nodeReport;
   }
 
@@ -186,8 +190,8 @@ public abstract class NodeReport {
   public abstract void setLastHealthReportTime(long lastHealthReport);
   
   /**
-   * Get labels of this node
-   * @return labels of this node
+   * Get labels of this node.
+   * @return labels of this node.
*/
   @Public
   @Stable
@@ -198,8 +202,8 @@ public abstract class NodeReport {
   public abstract void setNodeLabels(Set nodeLabels);
 
   /**

[36/36] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2017-11-28 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac9cc8a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac9cc8a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac9cc8a8

Branch: refs/heads/HDFS-7240
Commit: ac9cc8a8c32a459c6a178bdadfd767ab080e76be
Parents: b72799a 30941d9
Author: Anu Engineer 
Authored: Tue Nov 28 13:17:33 2017 -0800
Committer: Anu Engineer 
Committed: Tue Nov 28 13:17:33 2017 -0800

--
 .../dev-support/findbugsExcludeFile.xml |7 +
 .../apache/hadoop/fs/FSDataOutputStream.java|9 +
 .../apache/hadoop/fs/PathExistsException.java   |4 +-
 .../org/apache/hadoop/fs/StorageStatistics.java |5 +
 .../hadoop/metrics2/source/JvmMetrics.java  |2 +-
 .../apache/hadoop/util/JsonSerialization.java   |  299 +++
 .../src/main/resources/core-default.xml |  117 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   51 +-
 .../hadoop/metrics2/source/TestJvmMetrics.java  |2 +-
 .../apache/hadoop/test/GenericTestUtils.java|   29 +-
 .../org/apache/hadoop/test/HadoopTestBase.java  |   51 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  144 +-
 .../hadoop/util/TestJsonSerialization.java  |  185 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   15 +-
 .../hadoop/hdfs/DFSClientFaultInjector.java |2 +
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   |   55 +-
 .../hdfs/client/impl/TestLeaseRenewer.java  |   23 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |6 +-
 .../src/site/markdown/HDFSCommands.md   |   23 +
 .../hadoop/hdfs/TestDFSClientRetries.java   |   80 +
 .../hadoop/hdfs/TestDistributedFileSystem.java  |5 +-
 .../tools/TestViewFSStoragePolicyCommands.java  |   37 +-
 .../hadoop-hdfs/src/test/resources/editsStored  |  Bin 6293 -> 6753 bytes
 .../src/test/resources/editsStored.xml  |  750 ---
 .../mapreduce/TestMapreduceConfigFields.java|   27 +-
 .../lib/output/BindingPathOutputCommitter.java  |  184 ++
 .../lib/output/FileOutputCommitter.java |   12 +-
 .../lib/output/FileOutputCommitterFactory.java  |   38 +
 .../mapreduce/lib/output/FileOutputFormat.java  |   10 +-
 .../lib/output/NamedCommitterFactory.java   |   79 +
 .../lib/output/PathOutputCommitter.java |   17 +
 .../lib/output/PathOutputCommitterFactory.java  |  204 ++
 .../src/main/resources/mapred-default.xml   |   22 +
 .../TestClientDistributedCacheManager.java  |   28 +-
 .../lib/output/TestPathOutputCommitter.java |   24 +-
 .../output/TestPathOutputCommitterFactory.java  |  495 +
 hadoop-tools/hadoop-aws/pom.xml |   46 +-
 .../hadoop/fs/s3a/AWSBadRequestException.java   |   42 +
 .../hadoop/fs/s3a/AWSClientIOException.java |3 +-
 .../hadoop/fs/s3a/AWSNoResponseException.java   |   31 +
 .../hadoop/fs/s3a/AWSRedirectException.java |   38 +
 .../fs/s3a/AWSServiceThrottledException.java|   42 +
 .../hadoop/fs/s3a/AWSStatus500Exception.java|   37 +
 .../s3a/BlockingThreadPoolExecutorService.java  |2 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   72 +-
 .../fs/s3a/InconsistentAmazonS3Client.java  |  232 ++-
 .../java/org/apache/hadoop/fs/s3a/Invoker.java  |  487 +
 .../java/org/apache/hadoop/fs/s3a/Listing.java  |   26 +-
 .../java/org/apache/hadoop/fs/s3a/Retries.java  |   92 +
 .../hadoop/fs/s3a/S3ABlockOutputStream.java |  307 +--
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java |2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  940 +
 .../apache/hadoop/fs/s3a/S3AInputStream.java|   56 +-
 .../hadoop/fs/s3a/S3AInstrumentation.java   |  231 ++-
 .../apache/hadoop/fs/s3a/S3ARetryPolicy.java|  246 +++
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   12 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  328 ++-
 .../org/apache/hadoop/fs/s3a/S3ListRequest.java |   11 +
 .../hadoop/fs/s3a/S3ObjectAttributes.java   |   10 +-
 .../org/apache/hadoop/fs/s3a/Statistic.java |   56 +-
 .../hadoop/fs/s3a/WriteOperationHelper.java |  475 +
 .../fs/s3a/commit/AbstractS3ACommitter.java |  757 +++
 .../s3a/commit/AbstractS3ACommitterFactory.java |   90 +
 .../hadoop/fs/s3a/commit/CommitConstants.java   |  240 +++
 .../hadoop/fs/s3a/commit/CommitOperations.java  |  596 ++
 .../hadoop/fs/s3a/commit/CommitUtils.java   |  129 ++
 .../hadoop/fs/s3a/commit/CommitUtilsWithMR.java |  192 ++
 .../apache/hadoop/fs/s3a/commit/Duration.java   |   60 +
 .../hadoop/fs/s3a/commit/DurationInfo.java  |   59 +
 .../s3a/commit/InternalCommitterConstants.java  |  100 +
 .../hadoop/fs/s3a/commit/LocalTempDir.java  |   80 +
 .../fs/s3a/commit/MagicCommitIntegration.java   |  182 ++
 .../hadoop/fs/s3a/commit/MagicCommitPaths.java  |  229 ++
 

[05/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java
new file mode 100644
index 000..47d112d
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/TestMagicCommitPaths.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.hadoop.fs.Path;
+
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+import static org.apache.hadoop.fs.s3a.commit.MagicCommitPaths.*;
+import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
+
+/**
+ * Tests for {@link MagicCommitPaths} path operations.
+ */
+public class TestMagicCommitPaths extends Assert {
+
+  private static final List MAGIC_AT_ROOT =
+  list(MAGIC);
+  private static final List MAGIC_AT_ROOT_WITH_CHILD =
+  list(MAGIC, "child");
+  private static final List MAGIC_WITH_CHILD =
+  list("parent", MAGIC, "child");
+  private static final List MAGIC_AT_WITHOUT_CHILD =
+  list("parent", MAGIC);
+
+  private static final List DEEP_MAGIC =
+  list("parent1", "parent2", MAGIC, "child1", "child2");
+
+  public static final String[] EMPTY = {};
+
+  @Test
+  public void testSplitPathEmpty() throws Throwable {
+intercept(IllegalArgumentException.class,
+() -> splitPathToElements(new Path("")));
+  }
+
+  @Test
+  public void testSplitPathDoubleBackslash() {
+assertPathSplits("//", EMPTY);
+  }
+
+  @Test
+  public void testSplitRootPath() {
+assertPathSplits("/", EMPTY);
+  }
+
+  @Test
+  public void testSplitBasic() {
+assertPathSplits("/a/b/c",
+new String[]{"a", "b", "c"});
+  }
+
+  @Test
+  public void testSplitTrailingSlash() {
+assertPathSplits("/a/b/c/",
+new String[]{"a", "b", "c"});
+  }
+
+  @Test
+  public void testSplitShortPath() {
+assertPathSplits("/a",
+new String[]{"a"});
+  }
+
+  @Test
+  public void testSplitShortPathTrailingSlash() {
+assertPathSplits("/a/",
+new String[]{"a"});
+  }
+
+  @Test
+  public void testParentsMagicRoot() {
+assertParents(EMPTY, MAGIC_AT_ROOT);
+  }
+
+  @Test
+  public void testChildrenMagicRoot() {
+assertChildren(EMPTY, MAGIC_AT_ROOT);
+  }
+
+  @Test
+  public void testParentsMagicRootWithChild() {
+assertParents(EMPTY, MAGIC_AT_ROOT_WITH_CHILD);
+  }
+
+  @Test
+  public void testChildMagicRootWithChild() {
+assertChildren(a("child"), MAGIC_AT_ROOT_WITH_CHILD);
+  }
+
+  @Test
+  public void testChildrenMagicWithoutChild() {
+assertChildren(EMPTY, MAGIC_AT_WITHOUT_CHILD);
+  }
+
+  @Test
+  public void testChildMagicWithChild() {
+assertChildren(a("child"), MAGIC_WITH_CHILD);
+  }
+
+  @Test
+  public void testParentMagicWithChild() {
+assertParents(a("parent"), MAGIC_WITH_CHILD);
+  }
+
+  @Test
+  public void testParentDeepMagic() {
+assertParents(a("parent1", "parent2"), DEEP_MAGIC);
+  }
+
+  @Test
+  public void testChildrenDeepMagic() {
+assertChildren(a("child1", "child2"), DEEP_MAGIC);
+  }
+
+  @Test
+  public void testLastElementEmpty() throws Throwable {
+intercept(IllegalArgumentException.class,
+() -> lastElement(new ArrayList<>(0)));
+  }
+
+  @Test
+  public void testLastElementSingle() {
+assertEquals("first", lastElement(l("first")));
+  }
+
+  @Test
+  public void testLastElementDouble() {
+assertEquals("2", lastElement(l("first", "2")));
+  }
+
+  @Test
+  public void testFinalDestinationNoMagic() {
+assertEquals(l("first", "2"),
+finalDestination(l("first", "2")));
+  }
+
+  @Test
+  public void testFinalDestinationMagic1() {
+assertEquals(l("first", "2"),
+finalDestination(l("first", MAGIC, 

[17/36] hadoop git commit: YARN-5534. Allow user provided Docker volume mount list. (Contributed by Shane Kumpf)

2017-11-28 Thread aengineer
YARN-5534.  Allow user provided Docker volume mount list.  (Contributed by 
Shane Kumpf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d42a336c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d42a336c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d42a336c

Branch: refs/heads/HDFS-7240
Commit: d42a336cfab106d052aa30d80d9d30904123cb55
Parents: de8b6ca
Author: Eric Yang 
Authored: Wed Nov 22 13:05:34 2017 -0500
Committer: Eric Yang 
Committed: Wed Nov 22 13:05:34 2017 -0500

--
 .../runtime/DockerLinuxContainerRuntime.java|  42 +++
 .../linux/runtime/docker/DockerRunCommand.java  |  12 ++
 .../runtime/TestDockerContainerRuntime.java | 109 +++
 .../src/site/markdown/DockerContainers.md   |  48 
 4 files changed, 211 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d42a336c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 75a28e6..e61dc23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -65,6 +65,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*;
@@ -134,6 +135,16 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * source is an absolute path that is not a symlink and that points to a
  * localized resource.
  *   
+ *   
+ * {@code YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS} allows users to specify
+ + additional volume mounts for the Docker container. The value of the
+ * environment variable should be a comma-separated list of mounts.
+ * All such mounts must be given as {@code source:dest:mode}, and the mode
+ * must be "ro" (read-only) or "rw" (read-write) to specify the type of
+ * access being requested. The requested mounts will be validated by
+ * container-executor based on the values set in container-executor.cfg for
+ * {@code docker.allowed.ro-mounts} and {@code docker.allowed.rw-mounts}.
+ *   
  * 
  */
 @InterfaceAudience.Private
@@ -151,6 +162,8 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$";
   private static final Pattern hostnamePattern = Pattern.compile(
   HOSTNAME_PATTERN);
+  private static final Pattern USER_MOUNT_PATTERN = Pattern.compile(
+  "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)");
 
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_IMAGE =
@@ -176,6 +189,9 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS =
   "YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS";
+  @InterfaceAudience.Private
+  public static final String ENV_DOCKER_CONTAINER_MOUNTS =
+  "YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS";
 
   private Configuration conf;
   private Context nmContext;
@@ -675,6 +691,32 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   }
 }
 
+if (environment.containsKey(ENV_DOCKER_CONTAINER_MOUNTS)) {
+  Matcher parsedMounts = USER_MOUNT_PATTERN.matcher(
+  environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
+  if (!parsedMounts.find()) {
+throw new ContainerExecutionException(
+"Unable to parse user supplied mount list: "
++ environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
+  }
+  parsedMounts.reset();
+  while (parsedMounts.find()) {
+String src = parsedMounts.group(1);
+String dst = 

[20/36] hadoop git commit: YARN-7524. Remove unused FairSchedulerEventLog. (Contributed by Wilfred Spiegelenburg)

2017-11-28 Thread aengineer
YARN-7524. Remove unused FairSchedulerEventLog. (Contributed by Wilfred 
Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cc9479d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cc9479d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cc9479d

Branch: refs/heads/HDFS-7240
Commit: 4cc9479dae2bfb7d14d29b55d103eea9fa35a586
Parents: 738d1a2
Author: Yufei Gu 
Authored: Wed Nov 22 14:18:36 2017 -0800
Committer: Yufei Gu 
Committed: Wed Nov 22 14:18:36 2017 -0800

--
 .../scheduler/fair/FairScheduler.java   |   8 -
 .../fair/FairSchedulerConfiguration.java|  16 --
 .../scheduler/fair/FairSchedulerEventLog.java   | 152 ---
 .../fair/TestFairSchedulerEventLog.java |  83 --
 4 files changed, 259 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc9479d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index b2978d4..661d0a0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -177,7 +177,6 @@ public class FairScheduler extends
   protected double rackLocalityThreshold; // Cluster threshold for rack 
locality
   protected long nodeLocalityDelayMs; // Delay for node locality
   protected long rackLocalityDelayMs; // Delay for rack locality
-  private FairSchedulerEventLog eventLog; // Machine-readable event log
   protected boolean assignMultiple; // Allocate multiple containers per
 // heartbeat
   @VisibleForTesting
@@ -404,10 +403,6 @@ public class FairScheduler extends
 return continuousSchedulingSleepMs;
   }
 
-  public FairSchedulerEventLog getEventLog() {
-return eventLog;
-  }
-
   /**
* Add a new application to the scheduler, with a given id, queue name, and
* user. This will accept a new app even if the user or queue is above
@@ -875,7 +870,6 @@ public class FairScheduler extends
 try {
   writeLock.lock();
   long start = getClock().getTime();
-  eventLog.log("HEARTBEAT", nm.getHostName());
   super.nodeUpdate(nm);
 
   FSSchedulerNode fsNode = getFSSchedulerNode(nm.getNodeID());
@@ -1284,8 +1278,6 @@ public class FairScheduler extends
 
   // This stores per-application scheduling information
   this.applications = new ConcurrentHashMap<>();
-  this.eventLog = new FairSchedulerEventLog();
-  eventLog.init(this.conf);
 
   allocConf = new AllocationConfiguration(conf);
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc9479d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index 9c9eee6..38e71a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -17,7 +17,6 @@
 */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import java.io.File;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -64,12 +63,6 @@ public class FairSchedulerConfiguration extends 

[30/36] hadoop git commit: HDFS-12858. Add router admin commands usage in HDFS commands reference doc. Contributed by Yiqun Lin.

2017-11-28 Thread aengineer
HDFS-12858. Add router admin commands usage in HDFS commands reference doc. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94bed504
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94bed504
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94bed504

Branch: refs/heads/HDFS-7240
Commit: 94bed5047113fb148194380853ff01e92897a91f
Parents: d8923cd
Author: Yiqun Lin 
Authored: Tue Nov 28 11:48:55 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Nov 28 11:48:55 2017 +0800

--
 .../src/site/markdown/HDFSCommands.md   | 23 
 1 file changed, 23 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94bed504/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index c5f80d0..d8462c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -414,6 +414,29 @@ Usage:
 
 Runs a HDFS dfsadmin client.
 
+### `dfsrouter`
+
+Usage: `hdfs dfsrouter`
+
+Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more 
info.
+
+### `dfsrouteradmin`
+
+Usage:
+
+  hdfs dfsrouteradmin
+  [-add   ]
+  [-rm ]
+  [-ls ]
+
+| COMMAND\_OPTION | Description |
+|: |: |
+| `-add` *source* *nameservice* *destination* | Add a mount table entry or 
update if it exists. |
+| `-rm` *source* | Remove mount point of specified path. |
+| `-ls` *path* | List mount points under specified path. |
+
+The commands for managing Router-based federation. See [Mount table 
management](./HDFSRouterFederation.html#Mount_table_management) for more info.
+
 ### `diskbalancer`
 
 Usage:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/36] hadoop git commit: YARN-6647. RM can crash during transitionToStandby due to InterruptedException. Contributed by Bibin A Chundatt

2017-11-28 Thread aengineer
YARN-6647. RM can crash during transitionToStandby due to InterruptedException. 
Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2c7a73e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2c7a73e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2c7a73e

Branch: refs/heads/HDFS-7240
Commit: a2c7a73e33045ce42cce19aacbe45c0421a61994
Parents: 6b76695
Author: Jason Lowe 
Authored: Tue Nov 28 11:10:18 2017 -0600
Committer: Jason Lowe 
Committed: Tue Nov 28 11:15:44 2017 -0600

--
 .../RMDelegationTokenSecretManager.java | 42 ++--
 1 file changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c7a73e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
index 53cc471..37cd741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
@@ -82,14 +82,21 @@ public class RMDelegationTokenSecretManager extends
 return new RMDelegationTokenIdentifier();
   }
 
+  private boolean shouldIgnoreException(Exception e) {
+return !running && e.getCause() instanceof InterruptedException;
+  }
+
   @Override
   protected void storeNewMasterKey(DelegationKey newKey) {
 try {
   LOG.info("storing master key with keyID " + newKey.getKeyId());
   rm.getRMContext().getStateStore().storeRMDTMasterKey(newKey);
 } catch (Exception e) {
-  LOG.error("Error in storing master key with KeyID: " + 
newKey.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error(
+"Error in storing master key with KeyID: " + newKey.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -99,8 +106,10 @@ public class RMDelegationTokenSecretManager extends
   LOG.info("removing master key with keyID " + key.getKeyId());
   rm.getRMContext().getStateStore().removeRMDTMasterKey(key);
 } catch (Exception e) {
-  LOG.error("Error in removing master key with KeyID: " + key.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in removing master key with KeyID: " + 
key.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -113,9 +122,11 @@ public class RMDelegationTokenSecretManager extends
   rm.getRMContext().getStateStore().storeRMDelegationToken(identifier,
   renewDate);
 } catch (Exception e) {
-  LOG.error("Error in storing RMDelegationToken with sequence number: "
-  + identifier.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in storing RMDelegationToken with sequence number: "
++ identifier.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -127,9 +138,11 @@ public class RMDelegationTokenSecretManager extends
   + id.getSequenceNumber());
   rm.getRMContext().getStateStore().updateRMDelegationToken(id, renewDate);
 } catch (Exception e) {
-  LOG.error("Error in updating persisted RMDelegationToken" +
-" with sequence number: " + id.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in updating persisted RMDelegationToken"
++ " with sequence number: " + id.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -141,9 +154,12 @@ public class RMDelegationTokenSecretManager extends
   + ident.getSequenceNumber());
   rm.getRMContext().getStateStore().removeRMDelegationToken(ident);
 } catch (Exception e) {
-  LOG.error("Error in removing RMDelegationToken with sequence number: "
-  + ident.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if 

[25/36] hadoop git commit: YARN-7509. AsyncScheduleThread and ResourceCommitterService are still running after RM is transitioned to standby. (Tao Yang via wangda)

2017-11-28 Thread aengineer
YARN-7509. AsyncScheduleThread and ResourceCommitterService are still running 
after RM is transitioned to standby. (Tao Yang via wangda)

Change-Id: I7477fe355419fd4a0a6e2bdda7319abad4c4c748


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/834e91ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/834e91ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/834e91ee

Branch: refs/heads/HDFS-7240
Commit: 834e91ee91d22d74866afbf6252107e969bf8370
Parents: d162252
Author: Wangda Tan 
Authored: Thu Nov 23 19:59:03 2017 -0800
Committer: Wangda Tan 
Committed: Thu Nov 23 19:59:03 2017 -0800

--
 .../scheduler/capacity/CapacityScheduler.java   |  16 +-
 .../TestRMHAForAsyncScheduler.java  | 155 +++
 2 files changed, 164 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e91ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index ed30ad1..218adf3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -459,7 +459,7 @@ public class CapacityScheduler extends
* Schedule on all nodes by starting at a random point.
* @param cs
*/
-  static void schedule(CapacityScheduler cs) {
+  static void schedule(CapacityScheduler cs) throws InterruptedException{
 // First randomize the start point
 int current = 0;
 Collection nodes = cs.nodeTracker.getAllNodes();
@@ -475,9 +475,7 @@ public class CapacityScheduler extends
   cs.allocateContainersToNode(node.getNodeID(), false);
 }
 
-try {
-  Thread.sleep(cs.getAsyncScheduleInterval());
-} catch (InterruptedException e) {}
+Thread.sleep(cs.getAsyncScheduleInterval());
   }
 
   static class AsyncScheduleThread extends Thread {
@@ -492,9 +490,9 @@ public class CapacityScheduler extends
 
 @Override
 public void run() {
-  while (true) {
+  while (!Thread.currentThread().isInterrupted()) {
 try {
-  if (!runSchedules.get() || Thread.currentThread().isInterrupted()) {
+  if (!runSchedules.get()) {
 Thread.sleep(100);
   } else {
 // Don't run schedule if we have some pending backlogs already
@@ -505,9 +503,11 @@ public class CapacityScheduler extends
 }
   }
 } catch (InterruptedException ie) {
-  // Do nothing
+  // keep interrupt signal
+  Thread.currentThread().interrupt();
 }
   }
+  LOG.info("AsyncScheduleThread[" + getName() + "] exited!");
 }
 
 public void beginSchedule() {
@@ -546,8 +546,10 @@ public class CapacityScheduler extends
 
 } catch (InterruptedException e) {
   LOG.error(e);
+  Thread.currentThread().interrupt();
 }
   }
+  LOG.info("ResourceCommitterService exited!");
 }
 
 public void addNewCommitRequest(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e91ee/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java
new file mode 100644
index 000..46d5cda
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software 

[12/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitterFactory.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitterFactory.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitterFactory.java
new file mode 100644
index 000..b3bcca1
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/AbstractS3ACommitterFactory.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit;
+
+import java.io.IOException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory;
+
+/**
+ * Dynamically create the output committer based on subclass type and settings.
+ */
+public abstract class AbstractS3ACommitterFactory
+extends PathOutputCommitterFactory {
+  public static final Logger LOG = LoggerFactory.getLogger(
+  AbstractS3ACommitterFactory.class);
+
+  @Override
+  public PathOutputCommitter createOutputCommitter(Path outputPath,
+  TaskAttemptContext context) throws IOException {
+FileSystem fs = getDestinationFileSystem(outputPath, context);
+PathOutputCommitter outputCommitter;
+if (fs instanceof S3AFileSystem) {
+  outputCommitter = createTaskCommitter((S3AFileSystem)fs,
+  outputPath, context);
+} else {
+  throw new PathCommitException(outputPath,
+  "Filesystem not supported by this committer");
+}
+LOG.info("Using Commmitter {} for {}",
+outputCommitter,
+outputPath);
+return outputCommitter;
+  }
+
+  /**
+   * Get the destination filesystem, returning null if there is none.
+   * Code using this must explicitly or implicitly look for a null value
+   * in the response.
+   * @param outputPath output path
+   * @param context job/task context
+   * @return the destination filesystem, if it can be determined
+   * @throws IOException if the FS cannot be instantiated
+   */
+  protected FileSystem getDestinationFileSystem(Path outputPath,
+  JobContext context)
+  throws IOException {
+return outputPath != null ?
+  FileSystem.get(outputPath.toUri(), context.getConfiguration())
+  : null;
+  }
+
+  /**
+   * Implementation point: create a task committer for a specific filesystem.
+   * @param fileSystem destination FS.
+   * @param outputPath final output path for work
+   * @param context task context
+   * @return a committer
+   * @throws IOException any problem, including the FS not supporting
+   * the desired committer
+   */
+  public abstract PathOutputCommitter createTaskCommitter(
+  S3AFileSystem fileSystem,
+  Path outputPath,
+  TaskAttemptContext context) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
new file mode 100644
index 000..03cfcba
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitConstants.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use 

[09/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
new file mode 100644
index 000..b974ea8
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
@@ -0,0 +1,1951 @@
+
+
+# S3A Committers: Architecture and Implementation
+
+
+
+This document covers the architecture and implementation details of the S3A 
committers.
+
+For information on using the committers, see [the S3A 
Committers](./committer.html).
+
+
+## Problem: Efficient, reliable commits of work to consistent S3 buckets
+
+
+The standard commit algorithms (the `FileOutputCommitter` and its v1 and v2 
algorithms)
+rely on directory rename being an `O(1)` atomic operation: callers output their
+work to temporary directories in the destination filesystem, then
+rename these directories to the final destination as way of committing work.
+This is the perfect solution for commiting work against any filesystem with
+consistent listing operations and where the `FileSystem.rename()` command
+is an atomic `O(1)` operation.
+
+Using rename allows individual tasks to work in temporary directories, with the
+rename as the atomic operation can be used to explicitly commit tasks and
+ultimately the entire job. Because the cost of the rename is low, it can be
+performed during task and job commits with minimal delays. Note that HDFS
+will lock the namenode metadata during the rename operation, so all rename() 
calls
+will be serialized. However, as they only update the metadata of two directory
+entries, the duration of the lock is low.
+
+In contrast to a "real" filesystem, Amazon's S3A object store, similar to
+most others, does not support `rename()` at all. A hash operation on the 
filename
+determines the location of of the data —there is no separate metadata to 
change.
+To mimic renaming, the Hadoop S3A client has to copy the data to a new object
+with the destination filename, then delete the original entry. This copy
+can be executed server-side, but as it does not complete until the in-cluster
+copy has completed, it takes time proportional to the amount of data.
+
+The rename overhead is the most visible issue, but it is not the most 
dangerous.
+That is the fact that path listings have no consistency guarantees, and may
+lag the addition or deletion of files.
+If files are not listed, the commit operation will *not* copy them, and
+so they will not appear in the final output.
+
+The solution to this problem is closely coupled to the S3 protocol itself:
+delayed completion of multi-part PUT operations
+
+That is: tasks write all data as multipart uploads, *but delay the final
+commit action until until the final, single job commit action.* Only that
+data committed in the job commit action will be made visible; work from 
speculative
+and failed tasks will not be instiantiated. As there is no rename, there is no
+delay while data is copied from a temporary directory to the final directory.
+The duration of the commit will be the time needed to determine which commit 
operations
+to construct, and to execute them.
+
+
+## Terminology
+
+* *Job*: a potentially parallelized query/operation to execute. The execution
+of a job: the division of work into tasks and the management of their 
completion,
+is generally executed in a single process.
+
+The output of a Job is made visible to other stages in a larger operation
+sequence or other applications if the job *completes successfully*.
+
+* *Job Driver*. Not sure quite what term to use here. Whatever process 
schedules
+task execution, tracks success/failures and, determines when all the work has 
been
+processed and then commits the output. It may also determine that a job
+has failed and cannot be recovered, in which case the job is aborted.
+In MR and Tez, this is inside the YARN application master.
+In Spark it is the driver, which can run in the AM, the YARN client, or other
+places (e.g Livy?).
+
+* *Final directory*: the directory into which the output of a job is placed
+so as to be visible.
+
+* *Task* a single operation within a job, on a single process, one which 
generates
+one or more files.
+After a successful job completion, the data MUST be visible in the final 
directory.
+A task completes successfully if it generates all the output it expects to 
without
+failing in some way (error in processing; network/process failure).
+
+* *Job Context* an instance of the class 
`org.apache.hadoop.mapreduce.JobContext`,
+which provides a read-only view of the Job for the Job Driver and tasks.
+
+* *Task Attempt Context* an instance of the class
+`org.apache.hadoop.mapreduce.TaskAttemptContext 

[15/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java
new file mode 100644
index 000..13e1c61
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestPathOutputCommitterFactory.java
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.lib.output;
+
+import java.io.IOException;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+
+import static 
org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory.*;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+/**
+ * Test the committer factory logic, looking at the override
+ * and fallback behavior.
+ */
+@SuppressWarnings("unchecked")
+public class TestPathOutputCommitterFactory extends Assert {
+
+  private static final String HTTP_COMMITTER_FACTORY = String.format(
+  COMMITTER_FACTORY_SCHEME_PATTERN, "http");
+
+  private static final Path HTTP_PATH = new Path("http://hadoop.apache.org/;);
+  private static final Path HDFS_PATH = new Path("hdfs://localhost:8081/");
+
+  private TaskAttemptID taskAttemptID =
+  new TaskAttemptID("local", 0, TaskType.MAP, 1, 2);
+
+  /**
+   * Set a factory for a schema, verify it works.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testCommitterFactoryForSchema() throws Throwable {
+createCommitterFactory(SimpleCommitterFactory.class,
+HTTP_PATH,
+newBondedConfiguration());
+  }
+
+  /**
+   * A schema factory only affects that filesystem.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testCommitterFactoryFallbackDefault() throws Throwable {
+createCommitterFactory(FileOutputCommitterFactory.class,
+HDFS_PATH,
+newBondedConfiguration());
+  }
+
+  /**
+   * A schema factory only affects that filesystem; test through
+   * {@link PathOutputCommitterFactory#createCommitter(Path, 
TaskAttemptContext)}.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testCommitterFallbackDefault() throws Throwable {
+createCommitter(FileOutputCommitter.class,
+HDFS_PATH,
+taskAttempt(newBondedConfiguration()));
+  }
+
+  /**
+   * Verify that you can override any schema with an explicit name.
+   */
+  @Test
+  public void testCommitterFactoryOverride() throws Throwable {
+Configuration conf = newBondedConfiguration();
+// set up for the schema factory
+// and then set a global one which overrides the others.
+conf.set(COMMITTER_FACTORY_CLASS, OtherFactory.class.getName());
+createCommitterFactory(OtherFactory.class, HDFS_PATH, conf);
+createCommitterFactory(OtherFactory.class, HTTP_PATH, conf);
+  }
+
+  /**
+   * Verify that if the factory class option is "", schema factory
+   * resolution still works.
+   */
+  @Test
+  public void testCommitterFactoryEmptyOption() throws Throwable {
+Configuration conf = newBondedConfiguration();
+// set up for the schema factory
+// and then set a global one which overrides the others.
+conf.set(COMMITTER_FACTORY_CLASS, "");
+createCommitterFactory(SimpleCommitterFactory.class, HTTP_PATH, conf);
+
+// and HDFS, with no schema, falls back to the default
+

[19/36] hadoop git commit: HDFS-12754. Lease renewal can hit a deadlock. Contributed by Kuhu Shukla.

2017-11-28 Thread aengineer
HDFS-12754. Lease renewal can hit a deadlock. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/738d1a20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/738d1a20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/738d1a20

Branch: refs/heads/HDFS-7240
Commit: 738d1a206aba05f0b4be7d633b17db7fcd1c74bc
Parents: 785732c
Author: Kihwal Lee 
Authored: Wed Nov 22 13:01:07 2017 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 22 13:01:55 2017 -0600

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 15 +++-
 .../hadoop/hdfs/DFSClientFaultInjector.java |  2 +
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   | 55 +-
 .../hdfs/client/impl/TestLeaseRenewer.java  | 23 ++
 .../hadoop/hdfs/TestDFSClientRetries.java   | 80 
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  5 +-
 6 files changed, 122 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/738d1a20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 467d6be..5652321 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -483,12 +483,21 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   /** Get a lease and start automatic renewal */
   private void beginFileLease(final long inodeId, final DFSOutputStream out)
   throws IOException {
-getLeaseRenewer().put(inodeId, out, this);
+synchronized (filesBeingWritten) {
+  putFileBeingWritten(inodeId, out);
+  getLeaseRenewer().put(this);
+}
   }
 
   /** Stop renewal of lease for the file. */
   void endFileLease(final long inodeId) {
-getLeaseRenewer().closeFile(inodeId, this);
+synchronized (filesBeingWritten) {
+  removeFileBeingWritten(inodeId);
+  // remove client from renewer if no files are open
+  if (filesBeingWritten.isEmpty()) {
+getLeaseRenewer().closeClient(this);
+  }
+}
   }
 
 
@@ -614,9 +623,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   @Override
   public synchronized void close() throws IOException {
 if(clientRunning) {
+  // lease renewal stops when all files are closed
   closeAllFilesBeingWritten(false);
   clientRunning = false;
-  getLeaseRenewer().closeClient(this);
   // close connections to the namenode
   closeConnectionToNamenode();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/738d1a20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
index b58cf16..d36c058 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
@@ -63,4 +63,6 @@ public class DFSClientFaultInjector {
   }
 
   public void sleepBeforeHedgedGet() {}
+
+  public void delayWhenRenewLeaseTimeout() {}
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/738d1a20/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
index 6faf133..e33d024 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
@@ -30,7 +30,7 @@ import java.util.Map;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSOutputStream;
+import org.apache.hadoop.hdfs.DFSClientFaultInjector;
 import 

[32/36] hadoop git commit: YARN-7499. Layout changes to Application details page in new YARN UI. Contributed by Vasudevan Skm.

2017-11-28 Thread aengineer
YARN-7499. Layout changes to Application details page in new YARN UI. 
Contributed by Vasudevan Skm.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/641ba5c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/641ba5c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/641ba5c7

Branch: refs/heads/HDFS-7240
Commit: 641ba5c7a1471f8d799b1f919cd41daffb9da84e
Parents: 0ea182d
Author: Sunil G 
Authored: Tue Nov 28 18:37:11 2017 +0530
Committer: Sunil G 
Committed: Tue Nov 28 18:37:11 2017 +0530

--
 .../webapp/app/controllers/app-table-columns.js |   4 +-
 .../src/main/webapp/app/controllers/yarn-app.js |  69 -
 .../webapp/app/controllers/yarn-flowrun/info.js |   2 +-
 .../src/main/webapp/app/models/yarn-app.js  |   6 +-
 .../src/main/webapp/app/router.js   |  12 +-
 .../src/main/webapp/app/routes/yarn-app.js  |  23 +-
 .../main/webapp/app/routes/yarn-app/attempts.js |  15 +-
 .../main/webapp/app/routes/yarn-app/charts.js   |  18 +-
 .../webapp/app/routes/yarn-app/components.js|  16 +-
 .../main/webapp/app/routes/yarn-app/configs.js  |  16 +-
 .../src/main/webapp/app/routes/yarn-app/info.js |  17 +-
 .../src/main/webapp/app/serializers/yarn-app.js |   2 +-
 .../src/main/webapp/app/styles/app.scss |  24 ++
 .../src/main/webapp/app/styles/colors.scss  |   2 +
 .../src/main/webapp/app/styles/layout.scss  |  42 +++
 .../src/main/webapp/app/styles/variables.scss   |   4 +
 .../src/main/webapp/app/styles/yarn-app.scss|  35 +++
 .../app/templates/components/timeline-view.hbs  |   2 +-
 .../src/main/webapp/app/templates/yarn-app.hbs  | 149 +++---
 .../webapp/app/templates/yarn-app/attempts.hbs  |   2 +-
 .../webapp/app/templates/yarn-app/charts.hbs|  46 ++-
 .../app/templates/yarn-app/components.hbs   |   6 +-
 .../webapp/app/templates/yarn-app/configs.hbs   |  58 ++--
 .../main/webapp/app/templates/yarn-app/info.hbs | 281 +--
 .../webapp/app/templates/yarn-app/loading.hbs   |   2 +-
 .../main/webapp/app/templates/yarn-services.hbs |   2 +-
 26 files changed, 518 insertions(+), 337 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/641ba5c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index 05bfad45..a87acc1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -39,7 +39,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.id,
-  href: `#/yarn-app/${row.id}/info`
+  href: `#/yarn-app/${row.id}/attempts`
 };
   }
   }, {
@@ -120,7 +120,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.get('appName'),
-  href: `#/yarn-app/${row.id}/info?service=${row.get('appName')}`
+  href: `#/yarn-app/${row.id}/attempts?service=${row.get('appName')}`
 };
   }
 }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/641ba5c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index c40697f..b84f328 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -32,6 +32,65 @@ export default Ember.Controller.extend({
 text: 'App'
   }],
 
+  actions: {
+showStopServiceConfirm() {
+  this.set('actionResponse', null);
+  Ember.$("#stopServiceConfirmDialog").modal('show');
+},
+
+stopService() {
+  var self = this;
+  Ember.$("#stopServiceConfirmDialog").modal('hide');
+  var adapter = this.store.adapterFor('yarn-servicedef');
+  self.set('isLoading', true);
+  adapter.stopService(this.model.serviceName).then(function () {
+self.set('actionResponse', { msg: 'Service stopped successfully. Auto 
refreshing in 5 seconds.', type: 'success' });
+ 

[04/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
new file mode 100644
index 000..2c348f5
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
@@ -0,0 +1,696 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit.staging;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
+import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
+import com.google.common.collect.Sets;
+import org.hamcrest.core.StringStartsWith;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.AWSClientIOException;
+import org.apache.hadoop.fs.s3a.MockS3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.commit.files.PendingSet;
+import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.task.JobContextImpl;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
+import static org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants.*;
+import static 
org.apache.hadoop.fs.s3a.commit.staging.StagingCommitterConstants.*;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.commit.staging.Paths.*;
+import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.*;
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * The main unit test suite of the staging committer.
+ * Parameterized on thread count and unique filename policy.
+ */
+@RunWith(Parameterized.class)
+public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
+
+  private static final JobID JOB_ID = new JobID("job", 1);
+  private static final TaskAttemptID AID = new TaskAttemptID(
+  new TaskID(JOB_ID, TaskType.REDUCE, 2), 3);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStagingCommitter.class);
+
+  private final int numThreads;
+  private final boolean uniqueFilenames;
+  private JobContext job = null;
+  private TaskAttemptContext tac = null;
+  private Configuration conf = null;
+  private MockedStagingCommitter jobCommitter = null;
+  private MockedStagingCommitter committer = null;
+
+  // created in Before
+  private S3AFileSystem mockFS = null;
+  private MockS3AFileSystem wrapperFS = null;
+
+  // created in Before
+  private StagingTestBase.ClientResults results = null;
+  private StagingTestBase.ClientErrors errors = null;
+  private AmazonS3 

[35/36] hadoop git commit: HDFS-12857. StoragePolicyAdmin should support schema based path. Contributed by Surendra Singh Lilhore.

2017-11-28 Thread aengineer
HDFS-12857. StoragePolicyAdmin should support schema based path. Contributed by 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30941d99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30941d99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30941d99

Branch: refs/heads/HDFS-7240
Commit: 30941d99c9014431981eeb09ab24e90bef629fee
Parents: a2c7a73
Author: Surendra Singh Lilhore 
Authored: Tue Nov 28 23:57:03 2017 +0530
Committer: Surendra Singh Lilhore 
Committed: Tue Nov 28 23:57:03 2017 +0530

--
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  6 ++--
 .../tools/TestViewFSStoragePolicyCommands.java  | 37 ++--
 2 files changed, 38 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30941d99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index d5e5b4d..aeb10d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -153,7 +153,7 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
   }
 
   Path p = new Path(path);
-  final FileSystem fs = FileSystem.get(conf);
+  final FileSystem fs = FileSystem.get(p.toUri(), conf);
   try {
 FileStatus status;
 try {
@@ -233,7 +233,7 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
 return 1;
   }
   Path p = new Path(path);
-  final FileSystem fs = FileSystem.get(conf);
+  final FileSystem fs = FileSystem.get(p.toUri(), conf);
   try {
 fs.setStoragePolicy(p, policyName);
 System.out.println("Set storage policy " + policyName + " on " + path);
@@ -279,7 +279,7 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
   }
 
   Path p = new Path(path);
-  final FileSystem fs = FileSystem.get(conf);
+  final FileSystem fs = FileSystem.get(p.toUri(), conf);
   try {
 fs.unsetStoragePolicy(p);
 System.out.println("Unset storage policy from " + path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30941d99/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
index b3bb3c4..3a94959 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
@@ -21,18 +21,19 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FsConstants;
-
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 
 /**
  * Test StoragePolicyAdmin commands with ViewFileSystem.
@@ -77,4 +78,36 @@ public class TestViewFSStoragePolicyCommands extends 
TestStoragePolicyCommands {
 DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /", 2,
 "is not supported for filesystem viewfs on path /");
   }
+
+  @Test
+  public void testStoragePolicyCommandPathWithSchema() throws Exception {
+Path base1 = new Path("/user1");
+final Path bar = new Path(base1, "bar");
+DFSTestUtil.createFile(cluster.getFileSystem(0), bar, 1024, (short) 1, 0);
+
+// Test with hdfs:// schema
+String pathHdfsSchema = "hdfs://"
++ cluster.getNameNode(0).getClientNamenodeAddress() + "/"
++ bar.toString();
+

[33/36] hadoop git commit: YARN-7480. Render tooltips on columns where text is clipped in new YARN UI. Contributed by Vasudevan Skm. This closes #293

2017-11-28 Thread aengineer
YARN-7480. Render tooltips on columns where text is clipped in new YARN UI. 
Contributed by Vasudevan Skm. This closes #293


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b76695f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b76695f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b76695f

Branch: refs/heads/HDFS-7240
Commit: 6b76695f886d4db7287a0425d56d5e13daf5d08d
Parents: 641ba5c
Author: Sunil G 
Authored: Tue Nov 28 22:41:52 2017 +0530
Committer: Sunil G 
Committed: Tue Nov 28 22:41:52 2017 +0530

--
 .../app/components/em-table-tooltip-text.js | 33 +++
 .../webapp/app/controllers/app-table-columns.js |  4 ++
 .../components/em-table-tooltip-text.hbs| 26 
 .../components/em-table-tooltip-text-test.js| 43 
 4 files changed, 106 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
new file mode 100644
index 000..f363460
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ import Ember from 'ember';
+
+export default Ember.Component.extend({
+  content: null,
+
+  classNames: ["em-table-text-with-tooltip"],
+
+  didRender: function() {
+this.$().parent().css("position", "static");
+  },
+
+  tooltipText: Ember.computed("content", function () {
+return this.get("content");
+  }),
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index a87acc1..fb002f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -50,6 +50,7 @@ export default Ember.Controller.extend({
   }, {
   id: 'appName',
   headerTitle: 'Application Name',
+  cellComponentName: 'em-table-tooltip-text',
   contentPath: 'appName',
   facetType: null,
   }, {
@@ -66,6 +67,7 @@ export default Ember.Controller.extend({
   }, {
   id: 'queue',
   headerTitle: 'Queue',
+  cellComponentName: 'em-table-tooltip-text',
   contentPath: 'queue',
   }, {
   id: 'progress',
@@ -128,6 +130,7 @@ export default Ember.Controller.extend({
   headerTitle: 'Application ID',
   contentPath: 'id',
   facetType: null,
+  cellComponentName: 'em-table-tooltip-text',
   minWidth: "250px"
 }, {
   id: 'state',
@@ -160,6 +163,7 @@ export default Ember.Controller.extend({
 id: 'queue',
 headerTitle: 'Queue',
 contentPath: 'queue',
+cellComponentName: 'em-table-tooltip-text',
 }, {
   id: 'stTime',
   headerTitle: 'Started Time',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/em-table-tooltip-text.hbs
--
diff --git 

[08/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md
new file mode 100644
index 000..c6dbf55
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committers.md
@@ -0,0 +1,819 @@
+
+
+# Committing work to S3 with the "S3A Committers"
+
+
+
+This page covers the S3A Committers, which can commit work directly
+to an S3 object store.
+
+These committers are designed to solve a fundamental problem which
+the standard committers of work cannot do to S3: consistent, high performance,
+and reliable commitment of output to S3.
+
+For details on their internal design, see
+[S3A Committers: Architecture and 
Implementation](./committer_architecture.html).
+
+
+## Introduction: The Commit Problem
+
+
+Apache Hadoop MapReduce (and behind the scenes, Apache Spark) often write
+the output of their work to filesystems
+
+Normally, Hadoop uses the `FileOutputFormatCommitter` to manage the
+promotion of files created in a single task attempt to the final output of
+a query. This is done in a way to handle failures of tasks and jobs, and to
+support speculative execution. It does that by listing directories and renaming
+their content into the final destination when tasks and then jobs are 
committed.
+
+This has some key requirement of the underlying filesystem:
+
+1. When you list a directory, you see all the files which have been created in 
it,
+and no files which are not in it (i.e. have been deleted).
+1. When you rename a directory, it is an `O(1)` atomic transaction. No other
+process across the cluster may rename a file or directory to the same path.
+If the rename fails for any reason, either the data is at the original 
location,
+or it is at the destination, -in which case the rename actually succeeded.
+
+**The S3 object store and the `s3a://` filesystem client cannot meet these 
requirements.*
+
+1. Amazon S3 has inconsistent directory listings unless S3Guard is enabled.
+1. The S3A mimics `rename()` by copying files and then deleting the originals.
+This can fail partway through, and there is nothing to prevent any other 
process
+in the cluster attempting a rename at the same time.
+
+As a result,
+
+* Files my not be listed, hence not renamed into place.
+* Deleted files may still be discovered, confusing the rename process to the 
point
+of failure.
+* If a rename fails, the data is left in an unknown state.
+* If more than one process attempts to commit work simultaneously, the output
+directory may contain the results of both processes: it is no longer an 
exclusive
+operation.
+*. While S3Guard may deliver the listing consistency, commit time is still
+proportional to the amount of data created. It still can't handle task failure.
+
+**Using the "classic" `FileOutputCommmitter` to commit work to Amazon S3 risks
+loss or corruption of generated data**
+
+
+To address these problems there is now explicit support in the `hadop-aws`
+module for committing work to Amazon S3 via the S3A filesystem client,
+*the S3A Committers*
+
+
+For safe, as well as high-performance output of work to S3,
+we need use "a committer" explicitly written to work with S3, treating it as
+an object store with special features.
+
+
+### Background : Hadoop's "Commit Protocol"
+
+How exactly is work written to its final destination? That is accomplished by
+a "commit protocol" between the workers and the job manager.
+
+This protocol is implemented in Hadoop MapReduce, with a similar but extended
+version in Apache Spark:
+
+1. A "Job" is the entire query, with inputs to outputs
+1. The "Job Manager" is the process in charge of choreographing the execution
+of the job. It may perform some of the actual computation too.
+1. The job has "workers", which are processes which work the actual data
+and write the results.
+1. Workers execute "Tasks", which are fractions of the job, a job whose
+input has been *partitioned* into units of work which can be executed 
independently.
+1. The Job Manager directs workers to execute "tasks", usually trying to 
schedule
+the work close to the data (if the filesystem provides locality information).
+1. Workers can fail: the Job manager needs to detect this and reschedule their 
active tasks.
+1. Workers can also become separated from the Job Manager, a "network 
partition".
+It is (provably) impossible for the Job Manager to distinguish a 
running-but-unreachable
+worker from a failed one.
+1. The output of a failed task must not be visible; this is to avoid its
+data getting into the final output.
+1. Multiple workers can be instructed to evaluate the same partition of the 
work;
+this "speculation" delivers speedup as it can address the "straggler problem".

[26/36] hadoop git commit: YARN-7290. Method canContainerBePreempted can return true when it shouldn't. (Contributed by Steven Rand)

2017-11-28 Thread aengineer
YARN-7290. Method canContainerBePreempted can return true when it shouldn't. 
(Contributed by Steven Rand)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bde3aed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bde3aed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bde3aed

Branch: refs/heads/HDFS-7240
Commit: 2bde3aedf139368fc71f053d8dd6580b498ff46d
Parents: 834e91e
Author: Yufei Gu 
Authored: Fri Nov 24 23:32:46 2017 -0800
Committer: Yufei Gu 
Committed: Fri Nov 24 23:32:46 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java| 23 +--
 .../scheduler/fair/FSPreemptionThread.java  | 68 ++--
 .../fair/TestFairSchedulerPreemption.java   | 37 ---
 3 files changed, 93 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bde3aed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index e711229..43daace 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -588,7 +588,8 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 }
   }
 
-  boolean canContainerBePreempted(RMContainer container) {
+  boolean canContainerBePreempted(RMContainer container,
+  Resource alreadyConsideringForPreemption) {
 if (!isPreemptable()) {
   return false;
 }
@@ -610,6 +611,15 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
 // Check if the app's allocation will be over its fairshare even
 // after preempting this container
+Resource usageAfterPreemption = getUsageAfterPreemptingContainer(
+container.getAllocatedResource(),
+alreadyConsideringForPreemption);
+
+return !isUsageBelowShare(usageAfterPreemption, getFairShare());
+  }
+
+  private Resource getUsageAfterPreemptingContainer(Resource 
containerResources,
+  Resource alreadyConsideringForPreemption) {
 Resource usageAfterPreemption = Resources.clone(getResourceUsage());
 
 // Subtract resources of containers already queued for preemption
@@ -617,10 +627,13 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   Resources.subtractFrom(usageAfterPreemption, resourcesToBePreempted);
 }
 
-// Subtract this container's allocation to compute usage after preemption
-Resources.subtractFrom(
-usageAfterPreemption, container.getAllocatedResource());
-return !isUsageBelowShare(usageAfterPreemption, getFairShare());
+// Subtract resources of this container and other containers of this app
+// that the FSPreemptionThread is already considering for preemption.
+Resources.subtractFrom(usageAfterPreemption, containerResources);
+Resources.subtractFrom(usageAfterPreemption,
+alreadyConsideringForPreemption);
+
+return usageAfterPreemption;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bde3aed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index b3e59c5..47e580d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
+++ 

[03/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
index 230dbad..02236eb 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
@@ -26,6 +26,7 @@ import com.amazonaws.event.ProgressEvent;
 import com.amazonaws.event.ProgressEventType;
 import com.amazonaws.event.ProgressListener;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.s3a.S3ATestUtils;
 import org.junit.FixMethodOrder;
 import org.junit.Test;
 import org.junit.runners.MethodSorters;
@@ -70,16 +71,26 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
 
   private int uploadBlockSize = DEFAULT_UPLOAD_BLOCKSIZE;
   private int partitionSize;
+  private long filesize;
 
   @Override
   public void setup() throws Exception {
 super.setup();
-final Path testPath = getTestPath();
-scaleTestDir = new Path(testPath, "scale");
+scaleTestDir = new Path(getTestPath(), getTestSuiteName());
 hugefile = new Path(scaleTestDir, "hugefile");
 hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
+filesize = getTestPropertyBytes(getConf(), KEY_HUGE_FILESIZE,
+DEFAULT_HUGE_FILESIZE);
   }
 
+  /**
+   * Get the name of this test suite, which is used in path generation.
+   * Base implementation uses {@link #getBlockOutputBufferName()} for this.
+   * @return the name of the suite.
+   */
+  public String getTestSuiteName() {
+return getBlockOutputBufferName();
+  }
 
   /**
* Note that this can get called before test setup.
@@ -88,7 +99,7 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
   @Override
   protected Configuration createScaleConfiguration() {
 Configuration conf = super.createScaleConfiguration();
-partitionSize = (int)getTestPropertyBytes(conf,
+partitionSize = (int) getTestPropertyBytes(conf,
 KEY_HUGE_PARTITION_SIZE,
 DEFAULT_PARTITION_SIZE);
 assertTrue("Partition size too small: " + partitionSize,
@@ -99,6 +110,7 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
 conf.setInt(MULTIPART_SIZE, partitionSize);
 conf.set(USER_AGENT_PREFIX, "STestS3AHugeFileCreate");
 conf.set(FAST_UPLOAD_BUFFER, getBlockOutputBufferName());
+S3ATestUtils.disableFilesystemCaching(conf);
 return conf;
   }
 
@@ -111,17 +123,16 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
   @Test
   public void test_010_CreateHugeFile() throws IOException {
 assertFalse("Please run this test sequentially to avoid timeouts" +
-" and bandwidth problems", isParallelExecution());
-long filesize = getTestPropertyBytes(getConf(), KEY_HUGE_FILESIZE,
-DEFAULT_HUGE_FILESIZE);
+" and bandwidth problems", isParallelExecution());
 long filesizeMB = filesize / _1MB;
 
 // clean up from any previous attempts
 deleteHugeFile();
 
+Path fileToCreate = getPathOfFileToCreate();
 describe("Creating file %s of size %d MB" +
 " with partition size %d buffered by %s",
-hugefile, filesizeMB, partitionSize, getBlockOutputBufferName());
+fileToCreate, filesizeMB, partitionSize, getBlockOutputBufferName());
 
 // now do a check of available upload time, with a pessimistic bandwidth
 // (that of remote upload tests). If the test times out then not only is
@@ -134,7 +145,7 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
 assertTrue(String.format("Timeout set in %s seconds is too low;" +
 " estimating upload time of %d seconds at 1 MB/s." +
 " Rerun tests with -D%s=%d",
-timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2),
+timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2),
 uploadTime < timeout);
 assertEquals("File size set in " + KEY_HUGE_FILESIZE + " = " + filesize
 + " is not a multiple of " + uploadBlockSize,
@@ -162,7 +173,7 @@ public abstract class AbstractSTestS3AHugeFiles extends 
S3AScaleTestBase {
 S3AInstrumentation.OutputStreamStatistics streamStatistics;
 long blocksPer10MB = blocksPerMB * 10;
 ProgressCallback progress = new ProgressCallback(timer);
-try (FSDataOutputStream out = fs.create(hugefile,
+try (FSDataOutputStream out = fs.create(fileToCreate,
 true,
 uploadBlockSize,
 progress)) {
@@ -219,14 +230,8 @@ public abstract class AbstractSTestS3AHugeFiles 

[21/36] hadoop git commit: YARN-7330. Add support to show GPU in UI including metrics. Contributed by Wangda Tan.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab43959/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
deleted file mode 100644
index 1e8549b..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node.hbs
+++ /dev/null
@@ -1,125 +0,0 @@
-{{!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
---}}
-
-{{breadcrumb-bar breadcrumbs=breadcrumbs}}
-
-
-  
-
-{{node-menu-panel path="yarn-node" nodeId=model.rmNode.id 
nodeAddr=model.node.id}}
-
-
-
-  
-
-  
-  Node Information: 
{{model.rmNode.id}}
-  
-
-
-  Total Vmem allocated for Containers
-  {{divide num=model.node.totalVmemAllocatedContainersMB 
den=1024}} GB
-
-
-  Vmem enforcement enabled
-  {{model.node.vmemCheckEnabled}}
-
-
-  Total Pmem allocated for Containers
-  {{divide num=model.node.totalPmemAllocatedContainersMB 
den=1024}} GB
-
-
-  Pmem enforcement enabled
-  {{model.node.pmemCheckEnabled}}
-
-
-  Total VCores allocated for Containers
-  {{model.node.totalVCoresAllocatedContainers}}
-
-
-  Node Healthy Status
-  {{model.node.nodeHealthy}}
-
-
-  Last Node Health Report Time
-  {{model.node.lastNodeUpdateTime}}
-
-
-  Node Health Report
-  {{model.node.healthReport}}
-
-{{#if model.node.nmStartupTime}}
-  
-Node Manager Start Time
-{{model.node.nmStartupTime}}
-  
-{{/if}}
-
-  Node Manager Version
-  {{model.node.nodeManagerBuildVersion}}
-
-
-  Hadoop Version
-  {{model.node.hadoopBuildVersion}}
-
-
-  
-
-
-  
-
-  
-
-  
-
-  Resource - Memory
-
-
-  {{donut-chart data=model.rmNode.getMemoryDataForDonutChart
-  showLabels=true
-  parentId="mem-donut-chart"
-  ratio=0.6
-  type="memory"
-  colorTargets="good"
-  colorTargetReverse=true
-  maxHeight=350}}
-
-  
-
-
-
-  
-
-  Resource - VCores
-
-
-  {{donut-chart data=model.rmNode.getVCoreDataForDonutChart
-  showLabels=true
-  parentId="vcore-donut-chart"
-  ratio=0.6
-  colorTargets="good"
-  colorTargetReverse=true
-  maxHeight=350}}
-
-  
-
-  
-
-  
-
-{{outlet}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab43959/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
new file mode 100644
index 000..ad411c0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-node/info.hbs
@@ -0,0 +1,154 @@
+{{!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); 

[22/36] hadoop git commit: YARN-7330. Add support to show GPU in UI including metrics. Contributed by Wangda Tan.

2017-11-28 Thread aengineer
YARN-7330. Add support to show GPU in UI including metrics. Contributed by 
Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aab43959
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aab43959
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aab43959

Branch: refs/heads/HDFS-7240
Commit: aab439593b5d3459140b6e104722d4583dfcfddb
Parents: 4cc9479
Author: Sunil G 
Authored: Thu Nov 23 07:54:20 2017 +0530
Committer: Sunil G 
Committed: Thu Nov 23 07:54:20 2017 +0530

--
 .../dev-support/findbugs-exclude.xml|   8 +
 .../hadoop/yarn/api/records/Resource.java   |  21 +++
 .../resources/gpu/GpuResourceAllocator.java |  19 +-
 .../resources/gpu/GpuResourceHandlerImpl.java   |   1 -
 .../resourceplugin/ResourcePlugin.java  |  11 ++
 .../resourceplugin/gpu/AssignedGpuDevice.java   |  79 
 .../resourceplugin/gpu/GpuDevice.java   |   4 +-
 .../resourceplugin/gpu/GpuResourcePlugin.java   |  24 ++-
 .../nodemanager/webapp/NMWebServices.java   |  28 +++
 .../nodemanager/webapp/dao/NMResourceInfo.java  |  28 +++
 .../webapp/dao/gpu/GpuDeviceInformation.java|   2 +-
 .../webapp/dao/gpu/NMGpuResourceInfo.java   |  71 +++
 .../webapp/dao/gpu/PerGpuDeviceInformation.java |   2 +-
 .../webapp/dao/gpu/PerGpuMemoryUsage.java   |   2 +-
 .../resources/gpu/TestGpuResourceHandler.java   |   6 +-
 .../nodemanager/webapp/TestNMWebServices.java   | 185 +++
 .../dao/gpu/TestGpuDeviceInformationParser.java |   2 +-
 .../src/main/webapp/app/adapters/yarn-nm-gpu.js |  33 
 .../main/webapp/app/components/donut-chart.js   |  18 +-
 .../webapp/app/components/gpu-donut-chart.js|  66 +++
 .../src/main/webapp/app/constants.js|  13 ++
 .../webapp/app/controllers/yarn-nodes/table.js  |   2 +-
 .../main/webapp/app/models/cluster-metric.js|  69 +++
 .../src/main/webapp/app/models/yarn-nm-gpu.js   |  27 +++
 .../app/models/yarn-queue/capacity-queue.js |   3 +-
 .../src/main/webapp/app/models/yarn-rm-node.js  |  35 
 .../src/main/webapp/app/router.js   |   5 +-
 .../main/webapp/app/routes/cluster-overview.js  |   2 +-
 .../src/main/webapp/app/routes/yarn-node.js |   2 +
 .../webapp/app/routes/yarn-node/yarn-nm-gpu.js  |  22 +++
 .../main/webapp/app/serializers/yarn-nm-gpu.js  |  43 +
 .../serializers/yarn-queue/capacity-queue.js|   1 +
 .../main/webapp/app/serializers/yarn-rm-node.js |   4 +-
 .../webapp/app/templates/cluster-overview.hbs   |  88 ++---
 .../templates/components/node-menu-panel.hbs|  10 +-
 .../templates/components/yarn-nm-gpu-info.hbs   |  69 +++
 .../src/main/webapp/app/templates/yarn-node.hbs | 125 -
 .../webapp/app/templates/yarn-node/info.hbs | 154 +++
 .../app/templates/yarn-node/yarn-nm-gpu.hbs |  53 ++
 .../src/main/webapp/app/utils/converter.js  |  51 +
 40 files changed, 1181 insertions(+), 207 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab43959/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 670b7ee..de4b0e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -643,4 +643,12 @@
 
 
   
+
+  
+  
+
+
+
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aab43959/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 65b5dce..abd44b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -18,9 +18,14 @@
 
 package org.apache.hadoop.yarn.api.records;
 
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
 
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.NotImplementedException;
+import org.apache.curator.shaded.com.google.common.reflect.ClassPath;
 import org.apache.hadoop.classification.InterfaceAudience;
 import 

[01/36] hadoop git commit: YARN-7513. Remove the scheduler lock in FSAppAttempt.getWeight() (Contributed by Wilfred Spiegelenburg)

2017-11-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 b72799a51 -> ac9cc8a8c


YARN-7513. Remove the scheduler lock in FSAppAttempt.getWeight() (Contributed 
by Wilfred Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03c311ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03c311ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03c311ea

Branch: refs/heads/HDFS-7240
Commit: 03c311eae3ad591630a452921172a4406dbda181
Parents: 67bbbe1
Author: yufei 
Authored: Tue Nov 21 10:33:34 2017 -0800
Committer: yufei 
Committed: Tue Nov 21 10:33:34 2017 -0800

--
 .../resourcemanager/scheduler/fair/FSAppAttempt.java  | 14 --
 1 file changed, 4 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03c311ea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 94991eb..e711229 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -1304,20 +1304,14 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
   @Override
   public float getWeight() {
-double weight = 1.0;
+float weight = 1.0F;
 
 if (scheduler.isSizeBasedWeight()) {
-  scheduler.getSchedulerReadLock().lock();
-
-  try {
-// Set weight based on current memory demand
-weight = Math.log1p(getDemand().getMemorySize()) / Math.log(2);
-  } finally {
-scheduler.getSchedulerReadLock().unlock();
-  }
+  // Set weight based on current memory demand
+  weight = (float)(Math.log1p(demand.getMemorySize()) / Math.log(2));
 }
 
-return (float)weight * this.getPriority().getPriority();
+return weight * appPriority.getPriority();
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints.
Contributed by Steve Loughran and Ryan Blue.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de8b6ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de8b6ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de8b6ca5

Branch: refs/heads/HDFS-7240
Commit: de8b6ca5ef8614de6d6277b7617e27c788b0555c
Parents: 782ba3b
Author: Steve Loughran 
Authored: Wed Nov 22 15:28:12 2017 +
Committer: Steve Loughran 
Committed: Wed Nov 22 15:28:12 2017 +

--
 .../dev-support/findbugsExcludeFile.xml |7 +
 .../apache/hadoop/fs/FSDataOutputStream.java|9 +
 .../apache/hadoop/fs/PathExistsException.java   |4 +-
 .../org/apache/hadoop/fs/StorageStatistics.java |5 +
 .../apache/hadoop/util/JsonSerialization.java   |  299 +++
 .../src/main/resources/core-default.xml |  117 +-
 .../hadoop/fs/contract/ContractTestUtils.java   |   51 +-
 .../apache/hadoop/test/GenericTestUtils.java|   29 +-
 .../org/apache/hadoop/test/HadoopTestBase.java  |   51 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  144 +-
 .../hadoop/util/TestJsonSerialization.java  |  185 ++
 .../mapreduce/TestMapreduceConfigFields.java|   27 +-
 .../lib/output/BindingPathOutputCommitter.java  |  184 ++
 .../lib/output/FileOutputCommitter.java |   12 +-
 .../lib/output/FileOutputCommitterFactory.java  |   38 +
 .../mapreduce/lib/output/FileOutputFormat.java  |   10 +-
 .../lib/output/NamedCommitterFactory.java   |   79 +
 .../lib/output/PathOutputCommitter.java |   17 +
 .../lib/output/PathOutputCommitterFactory.java  |  204 ++
 .../src/main/resources/mapred-default.xml   |   22 +
 .../lib/output/TestPathOutputCommitter.java |   24 +-
 .../output/TestPathOutputCommitterFactory.java  |  495 +
 hadoop-tools/hadoop-aws/pom.xml |   46 +-
 .../hadoop/fs/s3a/AWSBadRequestException.java   |   42 +
 .../hadoop/fs/s3a/AWSClientIOException.java |3 +-
 .../hadoop/fs/s3a/AWSNoResponseException.java   |   31 +
 .../hadoop/fs/s3a/AWSRedirectException.java |   38 +
 .../fs/s3a/AWSServiceThrottledException.java|   42 +
 .../hadoop/fs/s3a/AWSStatus500Exception.java|   37 +
 .../s3a/BlockingThreadPoolExecutorService.java  |2 +-
 .../org/apache/hadoop/fs/s3a/Constants.java |   72 +-
 .../fs/s3a/InconsistentAmazonS3Client.java  |  232 ++-
 .../java/org/apache/hadoop/fs/s3a/Invoker.java  |  485 +
 .../java/org/apache/hadoop/fs/s3a/Listing.java  |   26 +-
 .../java/org/apache/hadoop/fs/s3a/Retries.java  |   92 +
 .../hadoop/fs/s3a/S3ABlockOutputStream.java |  307 +--
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java |2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  940 +
 .../apache/hadoop/fs/s3a/S3AInputStream.java|   56 +-
 .../hadoop/fs/s3a/S3AInstrumentation.java   |  231 ++-
 .../apache/hadoop/fs/s3a/S3ARetryPolicy.java|  246 +++
 .../hadoop/fs/s3a/S3AStorageStatistics.java |   12 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  324 ++-
 .../org/apache/hadoop/fs/s3a/S3ListRequest.java |   11 +
 .../hadoop/fs/s3a/S3ObjectAttributes.java   |   10 +-
 .../org/apache/hadoop/fs/s3a/Statistic.java |   56 +-
 .../hadoop/fs/s3a/WriteOperationHelper.java |  474 +
 .../fs/s3a/commit/AbstractS3ACommitter.java |  756 +++
 .../s3a/commit/AbstractS3ACommitterFactory.java |   90 +
 .../hadoop/fs/s3a/commit/CommitConstants.java   |  240 +++
 .../hadoop/fs/s3a/commit/CommitOperations.java  |  596 ++
 .../hadoop/fs/s3a/commit/CommitUtils.java   |  129 ++
 .../hadoop/fs/s3a/commit/CommitUtilsWithMR.java |  192 ++
 .../apache/hadoop/fs/s3a/commit/Duration.java   |   60 +
 .../hadoop/fs/s3a/commit/DurationInfo.java  |   59 +
 .../s3a/commit/InternalCommitterConstants.java  |  100 +
 .../hadoop/fs/s3a/commit/LocalTempDir.java  |   80 +
 .../fs/s3a/commit/MagicCommitIntegration.java   |  182 ++
 .../hadoop/fs/s3a/commit/MagicCommitPaths.java  |  229 ++
 .../fs/s3a/commit/PathCommitException.java  |   43 +
 .../apache/hadoop/fs/s3a/commit/PutTracker.java |  100 +
 .../fs/s3a/commit/S3ACommitterFactory.java  |  129 ++
 .../org/apache/hadoop/fs/s3a/commit/Tasks.java  |  410 
 .../hadoop/fs/s3a/commit/ValidationFailure.java |   53 +
 .../hadoop/fs/s3a/commit/files/PendingSet.java  |  192 ++
 .../s3a/commit/files/PersistentCommitData.java  |   69 +
 .../s3a/commit/files/SinglePendingCommit.java   |  432 
 .../hadoop/fs/s3a/commit/files/SuccessData.java |  322 +++
 .../fs/s3a/commit/files/package-info.java   |   45 +
 .../fs/s3a/commit/magic/MagicCommitTracker.java |  161 ++
 .../s3a/commit/magic/MagicS3GuardCommitter.java |  288 +++
 .../magic/MagicS3GuardCommitterFactory.java |   47 +
 

[10/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitterFactory.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitterFactory.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitterFactory.java
new file mode 100644
index 000..b446f22
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/PartitionedStagingCommitterFactory.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit.staging;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.commit.AbstractS3ACommitterFactory;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter;
+
+/**
+ * Factory for the {@link PartitionedStagingCommitter}.
+ */
+public class PartitionedStagingCommitterFactory
+extends AbstractS3ACommitterFactory {
+
+  /**
+   * Name of this class: {@value}.
+   */
+  public static final String CLASSNAME
+  = "org.apache.hadoop.fs.s3a.commit.staging"
+  + ".PartitionedStagingCommitterFactory";
+
+  public PathOutputCommitter createTaskCommitter(S3AFileSystem fileSystem,
+  Path outputPath,
+  TaskAttemptContext context) throws IOException {
+return new PartitionedStagingCommitter(outputPath, context);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
new file mode 100644
index 000..a4d39d7
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.commit.staging;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.UncheckedExecutionException;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsDirectoryException;
+import org.apache.hadoop.fs.s3a.Constants;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
+import static 

[13/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
index da1fc5a..ef5a434 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
@@ -24,7 +24,12 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricStringBuilder;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.Interns;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
@@ -122,8 +127,23 @@ public class S3AInstrumentation {
   STREAM_WRITE_BLOCK_UPLOADS_ABORTED,
   STREAM_WRITE_TOTAL_TIME,
   STREAM_WRITE_TOTAL_DATA,
+  COMMITTER_COMMITS_CREATED,
+  COMMITTER_COMMITS_COMPLETED,
+  COMMITTER_JOBS_SUCCEEDED,
+  COMMITTER_JOBS_FAILED,
+  COMMITTER_TASKS_SUCCEEDED,
+  COMMITTER_TASKS_FAILED,
+  COMMITTER_BYTES_COMMITTED,
+  COMMITTER_BYTES_UPLOADED,
+  COMMITTER_COMMITS_FAILED,
+  COMMITTER_COMMITS_ABORTED,
+  COMMITTER_COMMITS_REVERTED,
+  COMMITTER_MAGIC_FILES_CREATED,
   S3GUARD_METADATASTORE_PUT_PATH_REQUEST,
-  S3GUARD_METADATASTORE_INITIALIZATION
+  S3GUARD_METADATASTORE_INITIALIZATION,
+  S3GUARD_METADATASTORE_RETRY,
+  S3GUARD_METADATASTORE_THROTTLED,
+  STORE_IO_THROTTLED
   };
 
 
@@ -179,8 +199,11 @@ public class S3AInstrumentation {
   gauge(statistic.getSymbol(), statistic.getDescription());
 }
 //todo need a config for the quantiles interval?
+int interval = 1;
 quantiles(S3GUARD_METADATASTORE_PUT_PATH_LATENCY,
-"ops", "latency", 1);
+"ops", "latency", interval);
+quantiles(S3GUARD_METADATASTORE_THROTTLE_RATE,
+"events", "frequency (Hz)", interval);
   }
 
   /**
@@ -372,7 +395,7 @@ public class S3AInstrumentation {
   }
 
   /**
-   * Indicate that S3A deleted one or more file.s
+   * Indicate that S3A deleted one or more files.
* @param count number of files.
*/
   public void fileDeleted(int count) {
@@ -506,6 +529,14 @@ public class S3AInstrumentation {
   }
 
   /**
+   * Create a new instance of the committer statistics.
+   * @return a new committer statistics instance
+   */
+  CommitterStatistics newCommitterStatistics() {
+return new CommitterStatistics();
+  }
+
+  /**
* Merge in the statistics of a single input stream into
* the filesystem-wide statistics.
* @param statistics stream statistics
@@ -584,9 +615,12 @@ public class S3AInstrumentation {
 
 /**
  * The inner stream was opened.
+ * @return the previous count
  */
-public void streamOpened() {
+public long streamOpened() {
+  long count = openOperations;
   openOperations++;
+  return count;
 }
 
 /**
@@ -810,10 +844,13 @@ public class S3AInstrumentation {
 }
 
 /**
- * Note an exception in a multipart complete.
+ * Note exception in a multipart complete.
+ * @param count count of exceptions
  */
-void exceptionInMultipartComplete() {
-  exceptionsInMultipartFinalize.incrementAndGet();
+void exceptionInMultipartComplete(int count) {
+  if (count > 0) {
+exceptionsInMultipartFinalize.addAndGet(count);
+  }
 }
 
 /**
@@ -832,6 +869,15 @@ public class S3AInstrumentation {
 }
 
 /**
+ * Data has been uploaded to be committed in a subsequent operation;
+ * to be called at the end of the write.
+ * @param size size in bytes
+ */
+public void commitUploaded(long size) {
+  incrementCounter(COMMITTER_BYTES_UPLOADED, size);
+}
+
+/**
  * Output stream has closed.
  * Trigger merge in of all statistics not updated during operation.
  */
@@ -918,5 +964,176 @@ public class S3AInstrumentation {
 public void storeClosed() {
 
 }
+
+/**
+ * Throttled request.
+ */
+public void throttled() {
+  incrementCounter(S3GUARD_METADATASTORE_THROTTLED, 1);
+  addValueToQuantiles(S3GUARD_METADATASTORE_THROTTLE_RATE, 1);
+}
+
+/**
+ * S3Guard is retrying after a (retryable) failure.
+ */
+public void retrying() {
+  

[31/36] hadoop git commit: HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when numberOfPagesRemaining is 0. Contributed by Rajesh Balamohan

2017-11-28 Thread aengineer
HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when 
numberOfPagesRemaining is 0.
Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ea182d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ea182d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ea182d0

Branch: refs/heads/HDFS-7240
Commit: 0ea182d0faa35c726dcb37249d48786bfc8ca04c
Parents: 94bed50
Author: Steve Loughran 
Authored: Tue Nov 28 11:52:59 2017 +
Committer: Steve Loughran 
Committed: Tue Nov 28 11:52:59 2017 +

--
 .../java/org/apache/hadoop/fs/azure/PageBlobInputStream.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ea182d0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 097201b..aaac490 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -343,9 +343,9 @@ final class PageBlobInputStream extends InputStream {
 
 // Skip over whole pages as necessary without retrieving them from the
 // server.
-long pagesToSkipOver = Math.min(
+long pagesToSkipOver = Math.max(0, Math.min(
 n / PAGE_DATA_SIZE,
-numberOfPagesRemaining - 1);
+numberOfPagesRemaining - 1));
 numberOfPagesRemaining -= pagesToSkipOver;
 currentOffsetInBlob += pagesToSkipOver * PAGE_SIZE;
 skipped += pagesToSkipOver * PAGE_DATA_SIZE;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/36] hadoop git commit: HADOOP-15067. GC time percentage reported in JvmMetrics should be a gauge, not counter. Contributed by Misha Dmitriev.

2017-11-28 Thread aengineer
HADOOP-15067. GC time percentage reported in JvmMetrics should be a gauge, not 
counter. Contributed by Misha Dmitriev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d162252d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d162252d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d162252d

Branch: refs/heads/HDFS-7240
Commit: d162252d7a7223631ff66ba0210953296407e55f
Parents: b46ca7e
Author: Xiao Chen 
Authored: Thu Nov 23 09:00:59 2017 -0800
Committer: Xiao Chen 
Committed: Thu Nov 23 09:01:28 2017 -0800

--
 .../main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java| 2 +-
 .../java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d162252d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
index 8c3375f..5f9afdd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
@@ -188,7 +188,7 @@ public class JvmMetrics implements MetricsSource {
 }
 
 if (gcTimeMonitor != null) {
-  rb.addCounter(GcTimePercentage,
+  rb.addGauge(GcTimePercentage,
   gcTimeMonitor.getLatestGcData().getGcTimePercentage());
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d162252d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
index 5320b6e..aa1b009 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java
@@ -101,7 +101,7 @@ public class TestJvmMetrics {
 verify(rb).tag(SessionId, "test");
 for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
   if (info.name().equals("GcTimePercentage")) {
-verify(rb).addCounter(eq(info), anyInt());
+verify(rb).addGauge(eq(info), anyInt());
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java
new file mode 100644
index 000..55e3e37
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java
@@ -0,0 +1,322 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import java.io.IOException;
+import java.net.URI;
+
+import com.amazonaws.AmazonClientException;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase;
+import org.apache.hadoop.util.Progressable;
+
+/**
+ * Relays FS calls to the mocked FS, allows for some extra logging with
+ * stack traces to be included, stubbing out other methods
+ * where needed to avoid failures.
+ *
+ * The logging is useful for tracking
+ * down why there are extra calls to a method than a test would expect:
+ * changes in implementation details often trigger such false-positive
+ * test failures.
+ *
+ * This class is in the s3a package so that it has access to methods
+ */
+public class MockS3AFileSystem extends S3AFileSystem {
+  public static final String BUCKET = "bucket-name";
+  public static final URI FS_URI = URI.create("s3a://" + BUCKET + "/");
+  protected static final Logger LOG =
+  LoggerFactory.getLogger(MockS3AFileSystem.class);
+
+  private final S3AFileSystem mock;
+  private final Pair outcome;
+
+  /** Log nothing: {@value}. */
+  public static final int LOG_NONE = 0;
+
+  /** Log the name of the operation any arguments: {@value}.  */
+  public static final int LOG_NAME = 1;
+
+  /** Log the entire stack of where operations are called: {@value}.  */
+  public static final int LOG_STACK = 2;
+
+  /**
+   * This can be edited to set the log level of events through the
+   * mock FS.
+   */
+  private int logEvents = LOG_NAME;
+  private final S3AInstrumentation instrumentation =
+  new S3AInstrumentation(FS_URI);
+  private Configuration conf;
+
+  public MockS3AFileSystem(S3AFileSystem mock,
+  Pair 
outcome) {
+this.mock = mock;
+this.outcome = outcome;
+setUri(FS_URI);
+setBucket(BUCKET);
+  }
+
+  public Pair
+  getOutcome() {
+return outcome;
+  }
+
+  public int getLogEvents() {
+return logEvents;
+  }
+
+  public void setLogEvents(int logEvents) {
+this.logEvents = logEvents;
+  }
+
+  private void event(String format, Object... args) {
+Throwable ex = null;
+String s = String.format(format, args);
+switch (logEvents) {
+case LOG_STACK:
+  ex = new Exception(s);
+/* fall through */
+case LOG_NAME:
+  LOG.info(s, ex);
+  break;
+case LOG_NONE:
+default:
+  //nothing
+}
+  }
+
+  @Override
+  public Path getWorkingDirectory() {
+return new Path("s3a://" + BUCKET + "/work");
+  }
+
+  @Override
+  public void initialize(URI name, Configuration originalConf)
+  throws IOException {
+conf = originalConf;
+  }
+
+  @Override
+  public Configuration getConf() {
+return conf;
+  }
+
+  @Override
+  public boolean isMagicCommitEnabled() {
+return true;
+  }
+
+  /**
+   * Make operation to set the s3 client 

[14/36] hadoop git commit: HADOOP-13786 Add S3A committer for zero-rename commits to S3 endpoints. Contributed by Steve Loughran and Ryan Blue.

2017-11-28 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de8b6ca5/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index f846689..96de8e4 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -22,17 +22,16 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.Callable;
+import java.util.Locale;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.event.ProgressEvent;
 import com.amazonaws.event.ProgressEventType;
 import com.amazonaws.event.ProgressListener;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadResult;
 import com.amazonaws.services.s3.model.PartETag;
 import com.amazonaws.services.s3.model.PutObjectRequest;
 import com.amazonaws.services.s3.model.PutObjectResult;
@@ -47,8 +46,9 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.fs.StreamCapabilities;
+import org.apache.hadoop.fs.s3a.commit.CommitConstants;
+import org.apache.hadoop.fs.s3a.commit.PutTracker;
 import org.apache.hadoop.util.Progressable;
 
 import static org.apache.hadoop.fs.s3a.S3AUtils.*;
@@ -65,7 +65,8 @@ import static org.apache.hadoop.fs.s3a.Statistic.*;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-class S3ABlockOutputStream extends OutputStream {
+class S3ABlockOutputStream extends OutputStream implements
+StreamCapabilities {
 
   private static final Logger LOG =
   LoggerFactory.getLogger(S3ABlockOutputStream.class);
@@ -87,14 +88,6 @@ class S3ABlockOutputStream extends OutputStream {
   private final ListeningExecutorService executorService;
 
   /**
-   * Retry policy for multipart commits; not all AWS SDK versions retry that.
-   */
-  private final RetryPolicy retryPolicy =
-  RetryPolicies.retryUpToMaximumCountWithProportionalSleep(
-  5,
-  2000,
-  TimeUnit.MILLISECONDS);
-  /**
* Factory for blocks.
*/
   private final S3ADataBlocks.BlockFactory blockFactory;
@@ -120,7 +113,12 @@ class S3ABlockOutputStream extends OutputStream {
   /**
* Write operation helper; encapsulation of the filesystem operations.
*/
-  private final S3AFileSystem.WriteOperationHelper writeOperationHelper;
+  private final WriteOperationHelper writeOperationHelper;
+
+  /**
+   * Track multipart put operation.
+   */
+  private final PutTracker putTracker;
 
   /**
* An S3A output stream which uploads partitions in a separate pool of
@@ -138,6 +136,7 @@ class S3ABlockOutputStream extends OutputStream {
* @param blockFactory factory for creating stream destinations
* @param statistics stats for this stream
* @param writeOperationHelper state of the write operation.
+   * @param putTracker put tracking for commit support
* @throws IOException on any problem
*/
   S3ABlockOutputStream(S3AFileSystem fs,
@@ -147,7 +146,8 @@ class S3ABlockOutputStream extends OutputStream {
   long blockSize,
   S3ADataBlocks.BlockFactory blockFactory,
   S3AInstrumentation.OutputStreamStatistics statistics,
-  S3AFileSystem.WriteOperationHelper writeOperationHelper)
+  WriteOperationHelper writeOperationHelper,
+  PutTracker putTracker)
   throws IOException {
 this.fs = fs;
 this.key = key;
@@ -155,6 +155,7 @@ class S3ABlockOutputStream extends OutputStream {
 this.blockSize = (int) blockSize;
 this.statistics = statistics;
 this.writeOperationHelper = writeOperationHelper;
+this.putTracker = putTracker;
 Preconditions.checkArgument(blockSize >= Constants.MULTIPART_MIN_SIZE,
 "Block size is too small: %d", blockSize);
 this.executorService = MoreExecutors.listeningDecorator(executorService);
@@ -166,7 +167,11 @@ class S3ABlockOutputStream extends OutputStream {
 // writes a 0-byte entry.
 createBlockIfNeeded();
 LOG.debug("Initialized S3ABlockOutputStream for {}" +
-" output to {}", writeOperationHelper, activeBlock);
+" output to {}", key, activeBlock);
+if (putTracker.initialize()) {
+  LOG.debug("Put tracker requests multipart upload");
+  

hadoop git commit: HDFS-12850. Ozone: TestContainerPersistence#testListContainer fails frequently due to timed out. Contributed by Yiqun Lin.

2017-11-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ffccb1900 -> b72799a51


HDFS-12850. Ozone: TestContainerPersistence#testListContainer fails frequently 
due to timed out. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b72799a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b72799a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b72799a5

Branch: refs/heads/HDFS-7240
Commit: b72799a510ec956871feacce2931685c0d0b113d
Parents: ffccb19
Author: Anu Engineer 
Authored: Tue Nov 28 11:54:45 2017 -0800
Committer: Anu Engineer 
Committed: Tue Nov 28 11:54:45 2017 -0800

--
 .../ozone/container/common/impl/TestContainerPersistence.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b72799a5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 16f6e68..06d8062 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -322,14 +322,14 @@ public class TestContainerPersistence {
   }
 
   /**
-   * This test creates 1000 containers and reads them back 5 containers at a
+   * This test creates 50 containers and reads them back 5 containers at a
* time and verifies that we did get back all containers.
*
* @throws IOException
*/
   @Test
   public void testListContainer() throws IOException {
-final int count = 1000;
+final int count = 50;
 final int step = 5;
 
 Map testMap = new HashMap<>();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12864. Ozone: Turn off full synced write in RocksDB MetadataStore. Contributed by Elek, Marton.

2017-11-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 7f801cc88 -> ffccb1900


HDFS-12864. Ozone: Turn off full synced write in RocksDB MetadataStore. 
Contributed by  Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffccb190
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffccb190
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffccb190

Branch: refs/heads/HDFS-7240
Commit: ffccb190074782ef5d2b896063639c4de62aca84
Parents: 7f801cc
Author: Anu Engineer 
Authored: Tue Nov 28 11:04:38 2017 -0800
Committer: Anu Engineer 
Committed: Tue Nov 28 11:04:38 2017 -0800

--
 .../src/main/java/org/apache/hadoop/utils/RocksDBStore.java| 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffccb190/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
index aa957fa..c569989 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
@@ -67,8 +67,6 @@ public class RocksDBStore implements MetadataStore {
 dbOptions = options;
 dbLocation = dbFile;
 writeOptions = new WriteOptions();
-writeOptions.setSync(true);
-writeOptions.setNoSlowdown(true);
 try {
 
   db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12857. StoragePolicyAdmin should support schema based path. Contributed by Surendra Singh Lilhore.

2017-11-28 Thread surendralilhore
Repository: hadoop
Updated Branches:
  refs/heads/trunk a2c7a73e3 -> 30941d99c


HDFS-12857. StoragePolicyAdmin should support schema based path. Contributed by 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30941d99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30941d99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30941d99

Branch: refs/heads/trunk
Commit: 30941d99c9014431981eeb09ab24e90bef629fee
Parents: a2c7a73
Author: Surendra Singh Lilhore 
Authored: Tue Nov 28 23:57:03 2017 +0530
Committer: Surendra Singh Lilhore 
Committed: Tue Nov 28 23:57:03 2017 +0530

--
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  6 ++--
 .../tools/TestViewFSStoragePolicyCommands.java  | 37 ++--
 2 files changed, 38 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30941d99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index d5e5b4d..aeb10d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -153,7 +153,7 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
   }
 
   Path p = new Path(path);
-  final FileSystem fs = FileSystem.get(conf);
+  final FileSystem fs = FileSystem.get(p.toUri(), conf);
   try {
 FileStatus status;
 try {
@@ -233,7 +233,7 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
 return 1;
   }
   Path p = new Path(path);
-  final FileSystem fs = FileSystem.get(conf);
+  final FileSystem fs = FileSystem.get(p.toUri(), conf);
   try {
 fs.setStoragePolicy(p, policyName);
 System.out.println("Set storage policy " + policyName + " on " + path);
@@ -279,7 +279,7 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
   }
 
   Path p = new Path(path);
-  final FileSystem fs = FileSystem.get(conf);
+  final FileSystem fs = FileSystem.get(p.toUri(), conf);
   try {
 fs.unsetStoragePolicy(p);
 System.out.println("Unset storage policy from " + path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30941d99/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
index b3bb3c4..3a94959 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
@@ -21,18 +21,19 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FsConstants;
-
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
 
 /**
  * Test StoragePolicyAdmin commands with ViewFileSystem.
@@ -77,4 +78,36 @@ public class TestViewFSStoragePolicyCommands extends 
TestStoragePolicyCommands {
 DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /", 2,
 "is not supported for filesystem viewfs on path /");
   }
+
+  @Test
+  public void testStoragePolicyCommandPathWithSchema() throws Exception {
+Path base1 = new Path("/user1");
+final Path bar = new Path(base1, "bar");
+DFSTestUtil.createFile(cluster.getFileSystem(0), bar, 1024, (short) 1, 0);
+
+// Test with hdfs:// schema
+String pathHdfsSchema = "hdfs://"
++ 

hadoop git commit: YARN-6647. RM can crash during transitionToStandby due to InterruptedException. Contributed by Bibin A Chundatt

2017-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 b9239e0b7 -> e03725c26


YARN-6647. RM can crash during transitionToStandby due to InterruptedException. 
Contributed by Bibin A Chundatt

(cherry picked from commit a2c7a73e33045ce42cce19aacbe45c0421a61994)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e03725c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e03725c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e03725c2

Branch: refs/heads/branch-3.0.0
Commit: e03725c262aac44dcf7d42432f0b5e35ff8ff005
Parents: b9239e0
Author: Jason Lowe 
Authored: Tue Nov 28 11:10:18 2017 -0600
Committer: Jason Lowe 
Committed: Tue Nov 28 11:26:47 2017 -0600

--
 .../RMDelegationTokenSecretManager.java | 42 ++--
 1 file changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e03725c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
index 53cc471..37cd741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
@@ -82,14 +82,21 @@ public class RMDelegationTokenSecretManager extends
 return new RMDelegationTokenIdentifier();
   }
 
+  private boolean shouldIgnoreException(Exception e) {
+return !running && e.getCause() instanceof InterruptedException;
+  }
+
   @Override
   protected void storeNewMasterKey(DelegationKey newKey) {
 try {
   LOG.info("storing master key with keyID " + newKey.getKeyId());
   rm.getRMContext().getStateStore().storeRMDTMasterKey(newKey);
 } catch (Exception e) {
-  LOG.error("Error in storing master key with KeyID: " + 
newKey.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error(
+"Error in storing master key with KeyID: " + newKey.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -99,8 +106,10 @@ public class RMDelegationTokenSecretManager extends
   LOG.info("removing master key with keyID " + key.getKeyId());
   rm.getRMContext().getStateStore().removeRMDTMasterKey(key);
 } catch (Exception e) {
-  LOG.error("Error in removing master key with KeyID: " + key.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in removing master key with KeyID: " + 
key.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -113,9 +122,11 @@ public class RMDelegationTokenSecretManager extends
   rm.getRMContext().getStateStore().storeRMDelegationToken(identifier,
   renewDate);
 } catch (Exception e) {
-  LOG.error("Error in storing RMDelegationToken with sequence number: "
-  + identifier.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in storing RMDelegationToken with sequence number: "
++ identifier.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -127,9 +138,11 @@ public class RMDelegationTokenSecretManager extends
   + id.getSequenceNumber());
   rm.getRMContext().getStateStore().updateRMDelegationToken(id, renewDate);
 } catch (Exception e) {
-  LOG.error("Error in updating persisted RMDelegationToken" +
-" with sequence number: " + id.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in updating persisted RMDelegationToken"
++ " with sequence number: " + id.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -141,9 +154,12 @@ public class RMDelegationTokenSecretManager extends
   + ident.getSequenceNumber());
   rm.getRMContext().getStateStore().removeRMDelegationToken(ident);
 } catch (Exception e) {
-  

hadoop git commit: YARN-6647. RM can crash during transitionToStandby due to InterruptedException. Contributed by Bibin A Chundatt

2017-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 8bf74f325 -> a4f1e3036


YARN-6647. RM can crash during transitionToStandby due to InterruptedException. 
Contributed by Bibin A Chundatt

(cherry picked from commit a2c7a73e33045ce42cce19aacbe45c0421a61994)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4f1e303
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4f1e303
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4f1e303

Branch: refs/heads/branch-3.0
Commit: a4f1e3036d031ff970b863ee0b12b92fba079036
Parents: 8bf74f3
Author: Jason Lowe 
Authored: Tue Nov 28 11:10:18 2017 -0600
Committer: Jason Lowe 
Committed: Tue Nov 28 11:19:43 2017 -0600

--
 .../RMDelegationTokenSecretManager.java | 42 ++--
 1 file changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f1e303/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
index 53cc471..37cd741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
@@ -82,14 +82,21 @@ public class RMDelegationTokenSecretManager extends
 return new RMDelegationTokenIdentifier();
   }
 
+  private boolean shouldIgnoreException(Exception e) {
+return !running && e.getCause() instanceof InterruptedException;
+  }
+
   @Override
   protected void storeNewMasterKey(DelegationKey newKey) {
 try {
   LOG.info("storing master key with keyID " + newKey.getKeyId());
   rm.getRMContext().getStateStore().storeRMDTMasterKey(newKey);
 } catch (Exception e) {
-  LOG.error("Error in storing master key with KeyID: " + 
newKey.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error(
+"Error in storing master key with KeyID: " + newKey.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -99,8 +106,10 @@ public class RMDelegationTokenSecretManager extends
   LOG.info("removing master key with keyID " + key.getKeyId());
   rm.getRMContext().getStateStore().removeRMDTMasterKey(key);
 } catch (Exception e) {
-  LOG.error("Error in removing master key with KeyID: " + key.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in removing master key with KeyID: " + 
key.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -113,9 +122,11 @@ public class RMDelegationTokenSecretManager extends
   rm.getRMContext().getStateStore().storeRMDelegationToken(identifier,
   renewDate);
 } catch (Exception e) {
-  LOG.error("Error in storing RMDelegationToken with sequence number: "
-  + identifier.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in storing RMDelegationToken with sequence number: "
++ identifier.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -127,9 +138,11 @@ public class RMDelegationTokenSecretManager extends
   + id.getSequenceNumber());
   rm.getRMContext().getStateStore().updateRMDelegationToken(id, renewDate);
 } catch (Exception e) {
-  LOG.error("Error in updating persisted RMDelegationToken" +
-" with sequence number: " + id.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in updating persisted RMDelegationToken"
++ " with sequence number: " + id.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -141,9 +154,12 @@ public class RMDelegationTokenSecretManager extends
   + ident.getSequenceNumber());
   rm.getRMContext().getStateStore().removeRMDelegationToken(ident);
 } catch (Exception e) {
-  

hadoop git commit: YARN-6647. RM can crash during transitionToStandby due to InterruptedException. Contributed by Bibin A Chundatt

2017-11-28 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6b76695f8 -> a2c7a73e3


YARN-6647. RM can crash during transitionToStandby due to InterruptedException. 
Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2c7a73e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2c7a73e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2c7a73e

Branch: refs/heads/trunk
Commit: a2c7a73e33045ce42cce19aacbe45c0421a61994
Parents: 6b76695
Author: Jason Lowe 
Authored: Tue Nov 28 11:10:18 2017 -0600
Committer: Jason Lowe 
Committed: Tue Nov 28 11:15:44 2017 -0600

--
 .../RMDelegationTokenSecretManager.java | 42 ++--
 1 file changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2c7a73e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
index 53cc471..37cd741 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMDelegationTokenSecretManager.java
@@ -82,14 +82,21 @@ public class RMDelegationTokenSecretManager extends
 return new RMDelegationTokenIdentifier();
   }
 
+  private boolean shouldIgnoreException(Exception e) {
+return !running && e.getCause() instanceof InterruptedException;
+  }
+
   @Override
   protected void storeNewMasterKey(DelegationKey newKey) {
 try {
   LOG.info("storing master key with keyID " + newKey.getKeyId());
   rm.getRMContext().getStateStore().storeRMDTMasterKey(newKey);
 } catch (Exception e) {
-  LOG.error("Error in storing master key with KeyID: " + 
newKey.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error(
+"Error in storing master key with KeyID: " + newKey.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -99,8 +106,10 @@ public class RMDelegationTokenSecretManager extends
   LOG.info("removing master key with keyID " + key.getKeyId());
   rm.getRMContext().getStateStore().removeRMDTMasterKey(key);
 } catch (Exception e) {
-  LOG.error("Error in removing master key with KeyID: " + key.getKeyId());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in removing master key with KeyID: " + 
key.getKeyId());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -113,9 +122,11 @@ public class RMDelegationTokenSecretManager extends
   rm.getRMContext().getStateStore().storeRMDelegationToken(identifier,
   renewDate);
 } catch (Exception e) {
-  LOG.error("Error in storing RMDelegationToken with sequence number: "
-  + identifier.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in storing RMDelegationToken with sequence number: "
++ identifier.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -127,9 +138,11 @@ public class RMDelegationTokenSecretManager extends
   + id.getSequenceNumber());
   rm.getRMContext().getStateStore().updateRMDelegationToken(id, renewDate);
 } catch (Exception e) {
-  LOG.error("Error in updating persisted RMDelegationToken" +
-" with sequence number: " + id.getSequenceNumber());
-  ExitUtil.terminate(1, e);
+  if (!shouldIgnoreException(e)) {
+LOG.error("Error in updating persisted RMDelegationToken"
++ " with sequence number: " + id.getSequenceNumber());
+ExitUtil.terminate(1, e);
+  }
 }
   }
 
@@ -141,9 +154,12 @@ public class RMDelegationTokenSecretManager extends
   + ident.getSequenceNumber());
   rm.getRMContext().getStateStore().removeRMDelegationToken(ident);
 } catch (Exception e) {
-  LOG.error("Error in removing RMDelegationToken with sequence number: "
-  + 

hadoop git commit: YARN-7480. Render tooltips on columns where text is clipped in new YARN UI. Contributed by Vasudevan Skm. This closes #293

2017-11-28 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 641ba5c7a -> 6b76695f8


YARN-7480. Render tooltips on columns where text is clipped in new YARN UI. 
Contributed by Vasudevan Skm. This closes #293


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b76695f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b76695f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b76695f

Branch: refs/heads/trunk
Commit: 6b76695f886d4db7287a0425d56d5e13daf5d08d
Parents: 641ba5c
Author: Sunil G 
Authored: Tue Nov 28 22:41:52 2017 +0530
Committer: Sunil G 
Committed: Tue Nov 28 22:41:52 2017 +0530

--
 .../app/components/em-table-tooltip-text.js | 33 +++
 .../webapp/app/controllers/app-table-columns.js |  4 ++
 .../components/em-table-tooltip-text.hbs| 26 
 .../components/em-table-tooltip-text-test.js| 43 
 4 files changed, 106 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
new file mode 100644
index 000..f363460
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-tooltip-text.js
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ import Ember from 'ember';
+
+export default Ember.Component.extend({
+  content: null,
+
+  classNames: ["em-table-text-with-tooltip"],
+
+  didRender: function() {
+this.$().parent().css("position", "static");
+  },
+
+  tooltipText: Ember.computed("content", function () {
+return this.get("content");
+  }),
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index a87acc1..fb002f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -50,6 +50,7 @@ export default Ember.Controller.extend({
   }, {
   id: 'appName',
   headerTitle: 'Application Name',
+  cellComponentName: 'em-table-tooltip-text',
   contentPath: 'appName',
   facetType: null,
   }, {
@@ -66,6 +67,7 @@ export default Ember.Controller.extend({
   }, {
   id: 'queue',
   headerTitle: 'Queue',
+  cellComponentName: 'em-table-tooltip-text',
   contentPath: 'queue',
   }, {
   id: 'progress',
@@ -128,6 +130,7 @@ export default Ember.Controller.extend({
   headerTitle: 'Application ID',
   contentPath: 'id',
   facetType: null,
+  cellComponentName: 'em-table-tooltip-text',
   minWidth: "250px"
 }, {
   id: 'state',
@@ -160,6 +163,7 @@ export default Ember.Controller.extend({
 id: 'queue',
 headerTitle: 'Queue',
 contentPath: 'queue',
+cellComponentName: 'em-table-tooltip-text',
 }, {
   id: 'stTime',
   headerTitle: 'Started Time',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b76695f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/em-table-tooltip-text.hbs
--
diff --git 

hadoop git commit: YARN-7499. Layout changes to Application details page in new YARN UI. Contributed by Vasudevan Skm.

2017-11-28 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0ea182d0f -> 641ba5c7a


YARN-7499. Layout changes to Application details page in new YARN UI. 
Contributed by Vasudevan Skm.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/641ba5c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/641ba5c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/641ba5c7

Branch: refs/heads/trunk
Commit: 641ba5c7a1471f8d799b1f919cd41daffb9da84e
Parents: 0ea182d
Author: Sunil G 
Authored: Tue Nov 28 18:37:11 2017 +0530
Committer: Sunil G 
Committed: Tue Nov 28 18:37:11 2017 +0530

--
 .../webapp/app/controllers/app-table-columns.js |   4 +-
 .../src/main/webapp/app/controllers/yarn-app.js |  69 -
 .../webapp/app/controllers/yarn-flowrun/info.js |   2 +-
 .../src/main/webapp/app/models/yarn-app.js  |   6 +-
 .../src/main/webapp/app/router.js   |  12 +-
 .../src/main/webapp/app/routes/yarn-app.js  |  23 +-
 .../main/webapp/app/routes/yarn-app/attempts.js |  15 +-
 .../main/webapp/app/routes/yarn-app/charts.js   |  18 +-
 .../webapp/app/routes/yarn-app/components.js|  16 +-
 .../main/webapp/app/routes/yarn-app/configs.js  |  16 +-
 .../src/main/webapp/app/routes/yarn-app/info.js |  17 +-
 .../src/main/webapp/app/serializers/yarn-app.js |   2 +-
 .../src/main/webapp/app/styles/app.scss |  24 ++
 .../src/main/webapp/app/styles/colors.scss  |   2 +
 .../src/main/webapp/app/styles/layout.scss  |  42 +++
 .../src/main/webapp/app/styles/variables.scss   |   4 +
 .../src/main/webapp/app/styles/yarn-app.scss|  35 +++
 .../app/templates/components/timeline-view.hbs  |   2 +-
 .../src/main/webapp/app/templates/yarn-app.hbs  | 149 +++---
 .../webapp/app/templates/yarn-app/attempts.hbs  |   2 +-
 .../webapp/app/templates/yarn-app/charts.hbs|  46 ++-
 .../app/templates/yarn-app/components.hbs   |   6 +-
 .../webapp/app/templates/yarn-app/configs.hbs   |  58 ++--
 .../main/webapp/app/templates/yarn-app/info.hbs | 281 +--
 .../webapp/app/templates/yarn-app/loading.hbs   |   2 +-
 .../main/webapp/app/templates/yarn-services.hbs |   2 +-
 26 files changed, 518 insertions(+), 337 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/641ba5c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index 05bfad45..a87acc1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -39,7 +39,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.id,
-  href: `#/yarn-app/${row.id}/info`
+  href: `#/yarn-app/${row.id}/attempts`
 };
   }
   }, {
@@ -120,7 +120,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.get('appName'),
-  href: `#/yarn-app/${row.id}/info?service=${row.get('appName')}`
+  href: `#/yarn-app/${row.id}/attempts?service=${row.get('appName')}`
 };
   }
 }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/641ba5c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index c40697f..b84f328 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -32,6 +32,65 @@ export default Ember.Controller.extend({
 text: 'App'
   }],
 
+  actions: {
+showStopServiceConfirm() {
+  this.set('actionResponse', null);
+  Ember.$("#stopServiceConfirmDialog").modal('show');
+},
+
+stopService() {
+  var self = this;
+  Ember.$("#stopServiceConfirmDialog").modal('hide');
+  var adapter = this.store.adapterFor('yarn-servicedef');
+  self.set('isLoading', true);
+  adapter.stopService(this.model.serviceName).then(function () {
+self.set('actionResponse', { msg: 'Service 

[2/3] hadoop git commit: HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when numberOfPagesRemaining is 0. Contributed by Rajesh Balamohan

2017-11-28 Thread stevel
HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when 
numberOfPagesRemaining is 0.
Contributed by Rajesh Balamohan

(cherry picked from commit 0ea182d0faa35c726dcb37249d48786bfc8ca04c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bf74f32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bf74f32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bf74f32

Branch: refs/heads/branch-3.0
Commit: 8bf74f325a6fa6194c00cf42c107d87ce36b0465
Parents: 72d0be9
Author: Steve Loughran 
Authored: Tue Nov 28 11:57:12 2017 +
Committer: Steve Loughran 
Committed: Tue Nov 28 11:57:12 2017 +

--
 .../java/org/apache/hadoop/fs/azure/PageBlobInputStream.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf74f32/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 097201b..aaac490 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -343,9 +343,9 @@ final class PageBlobInputStream extends InputStream {
 
 // Skip over whole pages as necessary without retrieving them from the
 // server.
-long pagesToSkipOver = Math.min(
+long pagesToSkipOver = Math.max(0, Math.min(
 n / PAGE_DATA_SIZE,
-numberOfPagesRemaining - 1);
+numberOfPagesRemaining - 1));
 numberOfPagesRemaining -= pagesToSkipOver;
 currentOffsetInBlob += pagesToSkipOver * PAGE_SIZE;
 skipped += pagesToSkipOver * PAGE_DATA_SIZE;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when numberOfPagesRemaining is 0. Contributed by Rajesh Balamohan

2017-11-28 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3d73d2f09 -> 5c37a0b84
  refs/heads/branch-3.0 72d0be964 -> 8bf74f325
  refs/heads/trunk 94bed5047 -> 0ea182d0f


HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when 
numberOfPagesRemaining is 0.
Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ea182d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ea182d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ea182d0

Branch: refs/heads/trunk
Commit: 0ea182d0faa35c726dcb37249d48786bfc8ca04c
Parents: 94bed50
Author: Steve Loughran 
Authored: Tue Nov 28 11:52:59 2017 +
Committer: Steve Loughran 
Committed: Tue Nov 28 11:52:59 2017 +

--
 .../java/org/apache/hadoop/fs/azure/PageBlobInputStream.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ea182d0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 097201b..aaac490 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -343,9 +343,9 @@ final class PageBlobInputStream extends InputStream {
 
 // Skip over whole pages as necessary without retrieving them from the
 // server.
-long pagesToSkipOver = Math.min(
+long pagesToSkipOver = Math.max(0, Math.min(
 n / PAGE_DATA_SIZE,
-numberOfPagesRemaining - 1);
+numberOfPagesRemaining - 1));
 numberOfPagesRemaining -= pagesToSkipOver;
 currentOffsetInBlob += pagesToSkipOver * PAGE_SIZE;
 skipped += pagesToSkipOver * PAGE_DATA_SIZE;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when numberOfPagesRemaining is 0. Contributed by Rajesh Balamohan

2017-11-28 Thread stevel
HADOOP-15042. Azure PageBlobInputStream.skip() can return negative value when 
numberOfPagesRemaining is 0.
Contributed by Rajesh Balamohan

(cherry picked from commit 0ea182d0faa35c726dcb37249d48786bfc8ca04c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c37a0b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c37a0b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c37a0b8

Branch: refs/heads/branch-2
Commit: 5c37a0b841e0047f4731ed5cac12ce27efa76e6d
Parents: 3d73d2f
Author: Steve Loughran 
Authored: Tue Nov 28 11:57:51 2017 +
Committer: Steve Loughran 
Committed: Tue Nov 28 11:57:51 2017 +

--
 .../java/org/apache/hadoop/fs/azure/PageBlobInputStream.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c37a0b8/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
index 097201b..aaac490 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java
@@ -343,9 +343,9 @@ final class PageBlobInputStream extends InputStream {
 
 // Skip over whole pages as necessary without retrieving them from the
 // server.
-long pagesToSkipOver = Math.min(
+long pagesToSkipOver = Math.max(0, Math.min(
 n / PAGE_DATA_SIZE,
-numberOfPagesRemaining - 1);
+numberOfPagesRemaining - 1));
 numberOfPagesRemaining -= pagesToSkipOver;
 currentOffsetInBlob += pagesToSkipOver * PAGE_SIZE;
 skipped += pagesToSkipOver * PAGE_DATA_SIZE;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7564. Cleanup to fix checkstyle issues of YARN-5881 branch. Contributed by Sunil G.

2017-11-28 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5881 85fc1b517 -> f7b1257d0


YARN-7564. Cleanup to fix checkstyle issues of YARN-5881 branch. Contributed by 
Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7b1257d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7b1257d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7b1257d

Branch: refs/heads/YARN-5881
Commit: f7b1257d0bea2fbc5d7f3221e5fadae8c172f1d7
Parents: 85fc1b5
Author: Sunil G 
Authored: Tue Nov 28 14:07:09 2017 +0530
Committer: Sunil G 
Committed: Tue Nov 28 14:07:09 2017 +0530

--
 .../org/apache/hadoop/util/StringUtils.java | 18 ++---
 .../impl/pb/QueueConfigurationsPBImpl.java  | 10 +++
 .../resource/DominantResourceCalculator.java|  2 +-
 .../yarn/util/resource/ResourceCalculator.java  | 12 -
 .../scheduler/AbstractResourceUsage.java|  8 +-
 .../scheduler/ResourceUsage.java|  6 -
 .../scheduler/capacity/CSQueue.java | 28 +---
 .../CapacitySchedulerConfiguration.java | 28 
 .../scheduler/capacity/TestReservations.java|  3 +--
 9 files changed, 70 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b1257d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 1be8a08..ebe7013 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -1171,16 +1171,16 @@ public class StringUtils {
* @return true if only contains letters, and is non-null
*/
   public static boolean isAlpha(String str) {
-  if (str == null) {
-  return false;
-  }
-  int sz = str.length();
-  for (int i = 0; i < sz; i++) {
-  if (Character.isLetter(str.charAt(i)) == false) {
-  return false;
-  }
+if (str == null) {
+  return false;
+}
+int sz = str.length();
+for (int i = 0; i < sz; i++) {
+  if (!Character.isLetter(str.charAt(i))) {
+return false;
   }
-  return true;
+}
+return true;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b1257d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueConfigurationsPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueConfigurationsPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueConfigurationsPBImpl.java
index 80ef4b2..8266013 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueConfigurationsPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueConfigurationsPBImpl.java
@@ -236,12 +236,12 @@ public class QueueConfigurationsPBImpl extends 
QueueConfigurations {
   }
 
   @Override
-  public void setConfiguredMinCapacity(Resource configuredMinResource) {
+  public void setConfiguredMinCapacity(Resource minResource) {
 maybeInitBuilder();
-if (configuredMinResource == null) {
+if (minResource == null) {
   builder.clearConfiguredMinCapacity();
 }
-this.configuredMinResource = configuredMinResource;
+this.configuredMinResource = minResource;
   }
 
   @Override
@@ -259,11 +259,11 @@ public class QueueConfigurationsPBImpl extends 
QueueConfigurations {
   }
 
   @Override
-  public void setConfiguredMaxCapacity(Resource configuredMaxResource) {
+  public void setConfiguredMaxCapacity(Resource maxResource) {
 maybeInitBuilder();
 if (configuredMaxResource == null) {
   builder.clearConfiguredMaxCapacity();
 }
-this.configuredMaxResource = configuredMaxResource;
+this.configuredMaxResource = maxResource;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b1257d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git