hadoop git commit: HDFS-11658. Ozone: SCM daemon is unable to be started via CLI. Contributed by Weiwei Yang.

2017-04-17 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 368424b46 -> d889437c2


HDFS-11658. Ozone: SCM daemon is unable to be started via CLI. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d889437c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d889437c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d889437c

Branch: refs/heads/HDFS-7240
Commit: d889437c27e258e26bde35aad132ab69e48a6bc3
Parents: 368424b
Author: Anu Engineer 
Authored: Mon Apr 17 19:09:11 2017 -0700
Committer: Anu Engineer 
Committed: Mon Apr 17 19:09:11 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d889437c/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index afb27c3..90b2ae3 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -201,7 +201,7 @@ function hdfscmd_case
 ;;
 scm)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-  
HADOOP_CLASSNAME='org.apache.hadoop.ozone.storage.StorageContainerManager'
+  HADOOP_CLASSNAME='org.apache.hadoop.ozone.scm.StorageContainerManager'
   hadoop_debug "Appending HADOOP_SCM_OPTS onto HADOOP_OPTS"
   HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SCM_OPTS}"
 ;;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11581. Ozone: Support force delete a container. Contributed by Yuanbo Liu.

2017-04-17 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 350220bfb -> 368424b46


HDFS-11581. Ozone: Support force delete a container. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/368424b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/368424b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/368424b4

Branch: refs/heads/HDFS-7240
Commit: 368424b46534de937a10c72fec424ea506a00700
Parents: 350220b
Author: Anu Engineer 
Authored: Mon Apr 17 17:58:54 2017 -0700
Committer: Anu Engineer 
Committed: Mon Apr 17 17:58:54 2017 -0700

--
 .../main/proto/DatanodeContainerProtocol.proto  |   2 +
 .../common/helpers/ContainerUtils.java  |   8 +-
 .../common/impl/ContainerManagerImpl.java   |   7 +-
 .../ozone/container/common/impl/Dispatcher.java |  14 +-
 .../common/interfaces/ContainerManager.java |   5 +-
 .../ozone/container/ContainerTestHelper.java|  15 ++
 .../common/impl/TestContainerPersistence.java   |   4 +-
 .../container/ozoneimpl/TestOzoneContainer.java | 267 +++
 8 files changed, 208 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/368424b4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
index 09b7602..2d018ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
@@ -126,6 +126,7 @@ enum Result {
   CLOSED_CONTAINER_IO = 22;
   ERROR_CONTAINER_NOT_EMPTY = 23;
   ERROR_IN_COMPACT_DB = 24;
+  UNCLOSED_CONTAINER_IO = 25;
 }
 
 message ContainerCommandRequestProto {
@@ -244,6 +245,7 @@ message  UpdateContainerResponseProto {
 message  DeleteContainerRequestProto {
   required Pipeline pipeline = 1;
   required string name = 2;
+  optional bool forceDelete = 3 [default = false];
 }
 
 message  DeleteContainerResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/368424b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index fa5de14..7d0e756 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -338,15 +338,19 @@ public final class ContainerUtils {
* we created on the data location.
*
* @param containerData - Data of the container to remove.
+   * @param conf - configuration of the cluster.
+   * @param forceDelete - whether this container should be deleted forcibly.
* @throws IOException
*/
   public static void removeContainer(ContainerData containerData,
-  Configuration conf) throws IOException {
+  Configuration conf, boolean forceDelete) throws IOException {
 Preconditions.checkNotNull(containerData);
 Path dbPath = Paths.get(containerData.getDBPath());
 
 LevelDBStore db = KeyUtils.getDB(containerData, conf);
-if(!db.isEmpty()) {
+// If the container is not empty and cannot be deleted forcibly,
+// then throw a SCE to stop deleting.
+if(!forceDelete && !db.isEmpty()) {
   throw new StorageContainerException(
   "Container cannot be deleted because it is not empty.",
   ContainerProtos.Result.ERROR_CONTAINER_NOT_EMPTY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/368424b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 545f11d..5beae37 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ 

hadoop git commit: HDFS-11634. Optimize BlockIterator when interating starts in the middle. Contributed by Konstantin V Shvachko.

2017-04-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 07047335a -> 8b3b3aac1


HDFS-11634. Optimize BlockIterator when interating starts in the middle. 
Contributed by Konstantin V Shvachko.

(cherry picked from commit 8dfcd95d580bb090af7f40af0a57061518c18c8c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b3b3aac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b3b3aac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b3b3aac

Branch: refs/heads/branch-2.8
Commit: 8b3b3aac1016d3134573c2467d890a99c94223c5
Parents: 0704733
Author: Konstantin V Shvachko 
Authored: Mon Apr 17 15:04:06 2017 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Apr 17 17:09:38 2017 -0700

--
 .../server/blockmanagement/BlockManager.java|  6 +-
 .../blockmanagement/DatanodeDescriptor.java | 32 +++-
 .../org/apache/hadoop/hdfs/TestGetBlocks.java   | 77 ++--
 .../blockmanagement/BlockManagerTestUtil.java   | 16 
 4 files changed, 115 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b3b3aac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 858a54f..e792caf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1184,13 +1184,9 @@ public class BlockManager implements BlockStatsMXBean {
 if(numBlocks == 0) {
   return new BlocksWithLocations(new BlockWithLocations[0]);
 }
-Iterator iter = node.getBlockIterator();
 // starting from a random block
 int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
-// skip blocks
-for(int i=0; i iterators;
 
-private BlockIterator(final DatanodeStorageInfo... storages) {
+private BlockIterator(final int startBlock,
+  final DatanodeStorageInfo... storages) {
+  if(startBlock < 0) {
+throw new IllegalArgumentException(
+"Illegal value startBlock = " + startBlock);
+  }
   List iterators = new ArrayList<>();
+  int s = startBlock;
+  int sumBlocks = 0;
   for (DatanodeStorageInfo e : storages) {
-iterators.add(e.getBlockIterator());
+int numBlocks = e.numBlocks();
+sumBlocks += numBlocks;
+if(sumBlocks <= startBlock) {
+  s -= numBlocks;
+} else {
+  iterators.add(e.getBlockIterator());
+}
   }
   this.iterators = Collections.unmodifiableList(iterators);
+  // skip to the storage containing startBlock
+  for(; s > 0 && hasNext(); s--) {
+next();
+  }
 }
 
 @Override
 public boolean hasNext() {
   update();
-  return !iterators.isEmpty() && iterators.get(index).hasNext();
+  return index < iterators.size() && iterators.get(index).hasNext();
 }
 
 @Override
@@ -532,7 +549,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   Iterator getBlockIterator() {
-return new BlockIterator(getStorageInfos());
+return getBlockIterator(0);
+  }
+
+  /**
+   * Get iterator, which starts iterating from the specified block.
+   */
+  Iterator getBlockIterator(final int 

hadoop git commit: HDFS-11634. Optimize BlockIterator when interating starts in the middle. Contributed by Konstantin V Shvachko.

2017-04-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 051ab7780 -> c3840bde5


HDFS-11634. Optimize BlockIterator when interating starts in the middle. 
Contributed by Konstantin V Shvachko.

(cherry picked from commit 8dfcd95d580bb090af7f40af0a57061518c18c8c)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3840bde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3840bde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3840bde

Branch: refs/heads/branch-2
Commit: c3840bde54dccdff8018338a2ea4245e07d64b6c
Parents: 051ab77
Author: Konstantin V Shvachko 
Authored: Mon Apr 17 15:04:06 2017 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Apr 17 17:06:18 2017 -0700

--
 .../server/blockmanagement/BlockManager.java|  6 +-
 .../blockmanagement/DatanodeDescriptor.java | 32 +++-
 .../org/apache/hadoop/hdfs/TestGetBlocks.java   | 77 ++--
 .../blockmanagement/BlockManagerTestUtil.java   | 16 
 4 files changed, 115 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3840bde/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5d5706d..0e5cfc9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1235,13 +1235,9 @@ public class BlockManager implements BlockStatsMXBean {
 if(numBlocks == 0) {
   return new BlocksWithLocations(new BlockWithLocations[0]);
 }
-Iterator iter = node.getBlockIterator();
 // starting from a random block
 int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
-// skip blocks
-for(int i=0; i iterators;
 
-private BlockIterator(final DatanodeStorageInfo... storages) {
+private BlockIterator(final int startBlock,
+  final DatanodeStorageInfo... storages) {
+  if(startBlock < 0) {
+throw new IllegalArgumentException(
+"Illegal value startBlock = " + startBlock);
+  }
   List iterators = new ArrayList<>();
+  int s = startBlock;
+  int sumBlocks = 0;
   for (DatanodeStorageInfo e : storages) {
-iterators.add(e.getBlockIterator());
+int numBlocks = e.numBlocks();
+sumBlocks += numBlocks;
+if(sumBlocks <= startBlock) {
+  s -= numBlocks;
+} else {
+  iterators.add(e.getBlockIterator());
+}
   }
   this.iterators = Collections.unmodifiableList(iterators);
+  // skip to the storage containing startBlock
+  for(; s > 0 && hasNext(); s--) {
+next();
+  }
 }
 
 @Override
 public boolean hasNext() {
   update();
-  return !iterators.isEmpty() && iterators.get(index).hasNext();
+  return index < iterators.size() && iterators.get(index).hasNext();
 }
 
 @Override
@@ -536,7 +553,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   Iterator getBlockIterator() {
-return new BlockIterator(getStorageInfos());
+return getBlockIterator(0);
+  }
+
+  /**
+   * Get iterator, which starts iterating from the specified block.
+   */
+  Iterator getBlockIterator(final int 

hadoop git commit: HDFS-11634. Optimize BlockIterator when interating starts in the middle. Contributed by Konstantin V Shvachko.

2017-04-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk c0ca785db -> 8dfcd95d5


HDFS-11634. Optimize BlockIterator when interating starts in the middle. 
Contributed by Konstantin V Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dfcd95d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dfcd95d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dfcd95d

Branch: refs/heads/trunk
Commit: 8dfcd95d580bb090af7f40af0a57061518c18c8c
Parents: c0ca785
Author: Konstantin V Shvachko 
Authored: Mon Apr 17 15:04:06 2017 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Apr 17 16:56:24 2017 -0700

--
 .../server/blockmanagement/BlockManager.java|  6 +-
 .../blockmanagement/DatanodeDescriptor.java | 32 +++-
 .../org/apache/hadoop/hdfs/TestGetBlocks.java   | 77 ++--
 .../blockmanagement/BlockManagerTestUtil.java   | 16 
 4 files changed, 115 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dfcd95d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index be30e78..285acde 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1372,13 +1372,9 @@ public class BlockManager implements BlockStatsMXBean {
 if(numBlocks == 0) {
   return new BlocksWithLocations(new BlockWithLocations[0]);
 }
-Iterator iter = node.getBlockIterator();
 // starting from a random block
 int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
-// skip blocks
-for(int i=0; i iterators;
 
-private BlockIterator(final DatanodeStorageInfo... storages) {
+private BlockIterator(final int startBlock,
+  final DatanodeStorageInfo... storages) {
+  if(startBlock < 0) {
+throw new IllegalArgumentException(
+"Illegal value startBlock = " + startBlock);
+  }
   List iterators = new ArrayList<>();
+  int s = startBlock;
+  int sumBlocks = 0;
   for (DatanodeStorageInfo e : storages) {
-iterators.add(e.getBlockIterator());
+int numBlocks = e.numBlocks();
+sumBlocks += numBlocks;
+if(sumBlocks <= startBlock) {
+  s -= numBlocks;
+} else {
+  iterators.add(e.getBlockIterator());
+}
   }
   this.iterators = Collections.unmodifiableList(iterators);
+  // skip to the storage containing startBlock
+  for(; s > 0 && hasNext(); s--) {
+next();
+  }
 }
 
 @Override
 public boolean hasNext() {
   update();
-  return !iterators.isEmpty() && iterators.get(index).hasNext();
+  return index < iterators.size() && iterators.get(index).hasNext();
 }
 
 @Override
@@ -552,7 +569,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   Iterator getBlockIterator() {
-return new BlockIterator(getStorageInfos());
+return getBlockIterator(0);
+  }
+
+  /**
+   * Get iterator, which starts iterating from the specified block.
+   */
+  Iterator getBlockIterator(final int startBlock) {
+return new BlockIterator(startBlock, getStorageInfos());
   }
 

hadoop git commit: HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond precision. Contributed by Erik Krogen.

2017-04-17 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 fac5bdc02 -> ccd24f091


HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond 
precision. Contributed by Erik Krogen.

(cherry picked from commit ad49098eb324e238d97db68d7239ed2c4d84afa0)
(cherry picked from commit 051ab778065d3659c5284b5d95d04156c4ee7ffa)
(cherry picked from commit 07047335a3bc12b719ca608742bef1e565151035)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccd24f09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccd24f09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccd24f09

Branch: refs/heads/branch-2.7
Commit: ccd24f0917ec660dd2ba87c9aa77a4e42c2f49cc
Parents: fac5bdc
Author: Zhe Zhang 
Authored: Mon Apr 17 16:22:20 2017 -0700
Committer: Zhe Zhang 
Committed: Mon Apr 17 16:55:24 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   5 +-
 .../hdfs/server/namenode/FSNamesystemLock.java  | 129 +++
 .../src/main/resources/hdfs-default.xml |   6 +-
 .../server/namenode/TestFSNamesystemLock.java   |  16 +--
 4 files changed, 90 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccd24f09/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 47a6e47..286b3a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -225,7 +225,7 @@ Release 2.7.4 - UNRELEASED
 (Eric Krogen via aajisaka)
 
 HDFS-11352. Potential deadlock in NN when failing over.
-(Eric Krogen via aajisaka)
+(Erik Krogen via aajisaka)
 
 HDFS-11379. DFSInputStream may infinite loop requesting block locations. 
Contributed by Daryn Sharp.
 
@@ -235,6 +235,9 @@ Release 2.7.4 - UNRELEASED
 HDFS-11486. Client close() should not fail fast if the last block is being
 decommissioned. Contributed by Wei-Chiu Chuang and Yiqun Lin.
 
+HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond 
precision.
+(Erik Krogen via zhz)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccd24f09/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index c7dde2b..be33c4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -1,4 +1,3 @@
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -43,11 +42,15 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPOR
 /**
  * Mimics a ReentrantReadWriteLock but does not directly implement the 
interface
  * so more sophisticated locking capabilities and logging/metrics are possible.
- * If {@link 
org.apache.hadoop.hdfs.DFSConfigKeys#DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY}
- * is set to be true, metrics will be emitted into the FSNamesystem metrics
- * registry for each operation which acquires this lock indicating how long
- * the operation held the lock for. Note that if a thread dies, metrics 
produced
- * after the most recent snapshot will be lost due to the use of
+ * {@link 
org.apache.hadoop.hdfs.DFSConfigKeys#DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY}
+ * to be true, metrics will be emitted into the FSNamesystem metrics registry
+ * for each operation which acquires this lock indicating how long the 
operation
+ * held the lock for. These metrics have names of the form
+ * FSN(Read|Write)LockNanosOperationName, where OperationName denotes the name
+ * of the operation that initiated the lock hold (this will be OTHER for 
certain
+ * uncategorized operations) and they export the hold time values in
+ * nanoseconds. Note that if a thread dies, metrics produced after the
+ * most recent snapshot will be lost due to the use of
  * {@link MutableRatesWithAggregation}. However since threads are re-used
  * between operations this should not generally be an issue.
  */
@@ -63,24 +66,26 @@ class FSNamesystemLock {
* Log statements about long lock hold times will not be produced more
* frequently than this interval.
*/
-  

hadoop git commit: HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond precision. Contributed by Erik Krogen.

2017-04-17 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 0de6f2802 -> 07047335a


HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond 
precision. Contributed by Erik Krogen.

(cherry picked from commit ad49098eb324e238d97db68d7239ed2c4d84afa0)
(cherry picked from commit 051ab778065d3659c5284b5d95d04156c4ee7ffa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07047335
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07047335
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07047335

Branch: refs/heads/branch-2.8
Commit: 07047335a3bc12b719ca608742bef1e565151035
Parents: 0de6f28
Author: Zhe Zhang 
Authored: Mon Apr 17 16:22:20 2017 -0700
Committer: Zhe Zhang 
Committed: Mon Apr 17 16:36:21 2017 -0700

--
 .../hdfs/server/namenode/FSNamesystemLock.java  | 129 +++
 .../src/main/resources/hdfs-default.xml |   6 +-
 .../server/namenode/TestFSNamesystemLock.java   |  16 +--
 3 files changed, 86 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07047335/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index c7dde2b..be33c4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -1,4 +1,3 @@
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -43,11 +42,15 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPOR
 /**
  * Mimics a ReentrantReadWriteLock but does not directly implement the 
interface
  * so more sophisticated locking capabilities and logging/metrics are possible.
- * If {@link 
org.apache.hadoop.hdfs.DFSConfigKeys#DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY}
- * is set to be true, metrics will be emitted into the FSNamesystem metrics
- * registry for each operation which acquires this lock indicating how long
- * the operation held the lock for. Note that if a thread dies, metrics 
produced
- * after the most recent snapshot will be lost due to the use of
+ * {@link 
org.apache.hadoop.hdfs.DFSConfigKeys#DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY}
+ * to be true, metrics will be emitted into the FSNamesystem metrics registry
+ * for each operation which acquires this lock indicating how long the 
operation
+ * held the lock for. These metrics have names of the form
+ * FSN(Read|Write)LockNanosOperationName, where OperationName denotes the name
+ * of the operation that initiated the lock hold (this will be OTHER for 
certain
+ * uncategorized operations) and they export the hold time values in
+ * nanoseconds. Note that if a thread dies, metrics produced after the
+ * most recent snapshot will be lost due to the use of
  * {@link MutableRatesWithAggregation}. However since threads are re-used
  * between operations this should not generally be an issue.
  */
@@ -63,24 +66,26 @@ class FSNamesystemLock {
* Log statements about long lock hold times will not be produced more
* frequently than this interval.
*/
-  private final long lockSuppressWarningInterval;
+  private final long lockSuppressWarningIntervalMs;
 
   /** Threshold (ms) for long holding write lock report. */
-  private final long writeLockReportingThreshold;
+  private final long writeLockReportingThresholdMs;
   /** Last time stamp for write lock. Keep the longest one for 
multi-entrance.*/
-  private long writeLockHeldTimeStamp;
+  private long writeLockHeldTimeStampNanos;
   private int numWriteLockWarningsSuppressed = 0;
-  private long timeStampOfLastWriteLockReport = 0;
-  private long longestWriteLockHeldInterval = 0;
+  /** Time stamp (ms) of the last time a write lock report was written. */
+  private long timeStampOfLastWriteLockReportMs = 0;
+  /** Longest time (ms) a write lock was held since the last report. */
+  private long longestWriteLockHeldIntervalMs = 0;
 
   /** Threshold (ms) for long holding read lock report. */
-  private final long readLockReportingThreshold;
+  private final long readLockReportingThresholdMs;
   /**
* Last time stamp for read lock. Keep the longest one for
* multi-entrance. This is ThreadLocal since there could be
* many read locks held simultaneously.
*/
-  private final ThreadLocal 

hadoop git commit: HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond precision. Contributed by Erik Krogen.

2017-04-17 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9289f4527 -> 051ab7780


HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond 
precision. Contributed by Erik Krogen.

(cherry picked from commit ad49098eb324e238d97db68d7239ed2c4d84afa0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/051ab778
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/051ab778
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/051ab778

Branch: refs/heads/branch-2
Commit: 051ab778065d3659c5284b5d95d04156c4ee7ffa
Parents: 9289f45
Author: Zhe Zhang 
Authored: Mon Apr 17 16:22:20 2017 -0700
Committer: Zhe Zhang 
Committed: Mon Apr 17 16:28:58 2017 -0700

--
 .../hdfs/server/namenode/FSNamesystemLock.java  | 129 +++
 .../src/main/resources/hdfs-default.xml |   6 +-
 .../server/namenode/TestFSNamesystemLock.java   |  16 +--
 3 files changed, 86 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/051ab778/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index c7dde2b..be33c4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -1,4 +1,3 @@
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -43,11 +42,15 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPOR
 /**
  * Mimics a ReentrantReadWriteLock but does not directly implement the 
interface
  * so more sophisticated locking capabilities and logging/metrics are possible.
- * If {@link 
org.apache.hadoop.hdfs.DFSConfigKeys#DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY}
- * is set to be true, metrics will be emitted into the FSNamesystem metrics
- * registry for each operation which acquires this lock indicating how long
- * the operation held the lock for. Note that if a thread dies, metrics 
produced
- * after the most recent snapshot will be lost due to the use of
+ * {@link 
org.apache.hadoop.hdfs.DFSConfigKeys#DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY}
+ * to be true, metrics will be emitted into the FSNamesystem metrics registry
+ * for each operation which acquires this lock indicating how long the 
operation
+ * held the lock for. These metrics have names of the form
+ * FSN(Read|Write)LockNanosOperationName, where OperationName denotes the name
+ * of the operation that initiated the lock hold (this will be OTHER for 
certain
+ * uncategorized operations) and they export the hold time values in
+ * nanoseconds. Note that if a thread dies, metrics produced after the
+ * most recent snapshot will be lost due to the use of
  * {@link MutableRatesWithAggregation}. However since threads are re-used
  * between operations this should not generally be an issue.
  */
@@ -63,24 +66,26 @@ class FSNamesystemLock {
* Log statements about long lock hold times will not be produced more
* frequently than this interval.
*/
-  private final long lockSuppressWarningInterval;
+  private final long lockSuppressWarningIntervalMs;
 
   /** Threshold (ms) for long holding write lock report. */
-  private final long writeLockReportingThreshold;
+  private final long writeLockReportingThresholdMs;
   /** Last time stamp for write lock. Keep the longest one for 
multi-entrance.*/
-  private long writeLockHeldTimeStamp;
+  private long writeLockHeldTimeStampNanos;
   private int numWriteLockWarningsSuppressed = 0;
-  private long timeStampOfLastWriteLockReport = 0;
-  private long longestWriteLockHeldInterval = 0;
+  /** Time stamp (ms) of the last time a write lock report was written. */
+  private long timeStampOfLastWriteLockReportMs = 0;
+  /** Longest time (ms) a write lock was held since the last report. */
+  private long longestWriteLockHeldIntervalMs = 0;
 
   /** Threshold (ms) for long holding read lock report. */
-  private final long readLockReportingThreshold;
+  private final long readLockReportingThresholdMs;
   /**
* Last time stamp for read lock. Keep the longest one for
* multi-entrance. This is ThreadLocal since there could be
* many read locks held simultaneously.
*/
-  private final ThreadLocal readLockHeldTimeStamp =
+  private final ThreadLocal readLockHeldTimeStampNanos 

hadoop git commit: HDFS-11652. Improve ECSchema and ErasureCodingPolicy toString, hashCode, equals.

2017-04-17 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk ad49098eb -> c0ca785db


HDFS-11652. Improve ECSchema and ErasureCodingPolicy toString, hashCode, equals.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0ca785d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0ca785d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0ca785d

Branch: refs/heads/trunk
Commit: c0ca785dbb516335ea7e170abb57550251a5d8f6
Parents: ad49098
Author: Andrew Wang 
Authored: Mon Apr 17 16:30:25 2017 -0700
Committer: Andrew Wang 
Committed: Mon Apr 17 16:30:25 2017 -0700

--
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 39 +
 .../hadoop/io/erasurecode/TestECSchema.java | 47 ++-
 .../hdfs/protocol/ErasureCodingPolicy.java  | 39 ++---
 .../hdfs/protocol/TestErasureCodingPolicy.java  | 86 
 4 files changed, 177 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ca785d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 1be653b..1f11757 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -21,6 +21,8 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -199,34 +201,31 @@ public final class ECSchema {
 
   @Override
   public boolean equals(Object o) {
-if (this == o) {
-  return true;
-}
-if (o == null || getClass() != o.getClass()) {
-  return false;
-}
-
-ECSchema ecSchema = (ECSchema) o;
-
-if (numDataUnits != ecSchema.numDataUnits) {
+if (o == null) {
   return false;
 }
-if (numParityUnits != ecSchema.numParityUnits) {
-  return false;
+if (o == this) {
+  return true;
 }
-if (!codecName.equals(ecSchema.codecName)) {
+if (o.getClass() != getClass()) {
   return false;
 }
-return extraOptions.equals(ecSchema.extraOptions);
+ECSchema rhs = (ECSchema) o;
+return new EqualsBuilder()
+.append(codecName, rhs.codecName)
+.append(extraOptions, rhs.extraOptions)
+.append(numDataUnits, rhs.numDataUnits)
+.append(numParityUnits, rhs.numParityUnits)
+.isEquals();
   }
 
   @Override
   public int hashCode() {
-int result = codecName.hashCode();
-result = 31 * result + extraOptions.hashCode();
-result = 31 * result + numDataUnits;
-result = 31 * result + numParityUnits;
-
-return result;
+return new HashCodeBuilder(1273158869, 1555022101)
+.append(codecName)
+.append(extraOptions)
+.append(numDataUnits)
+.append(numParityUnits)
+.toHashCode();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ca785d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
index 5726246..ae03835 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
@@ -22,13 +22,16 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
 import java.util.HashMap;
 import java.util.Map;
+import java.util.TreeMap;
 
 public class TestECSchema {
 
-   @Rule
-   public Timeout globalTimeout = new Timeout(30);
+  @Rule
+  public Timeout globalTimeout = new Timeout(30);
 
   @Test
   public void testGoodSchema() {
@@ -51,5 +54,45 @@ public class TestECSchema {
 assertEquals(numParityUnits, schema.getNumParityUnits());
 assertEquals(codec, schema.getCodecName());
 assertEquals(extraOptionValue, schema.getExtraOptions().get(extraOption));
+
+Map 

hadoop git commit: HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond precision. Contributed by Erik Krogen.

2017-04-17 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6ed9d3622 -> ad49098eb


HDFS-11615. FSNamesystemLock metrics can be inaccurate due to millisecond 
precision. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad49098e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad49098e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad49098e

Branch: refs/heads/trunk
Commit: ad49098eb324e238d97db68d7239ed2c4d84afa0
Parents: 6ed9d36
Author: Zhe Zhang 
Authored: Mon Apr 17 16:22:20 2017 -0700
Committer: Zhe Zhang 
Committed: Mon Apr 17 16:22:33 2017 -0700

--
 .../hdfs/server/namenode/FSNamesystemLock.java  | 121 +++
 .../src/main/resources/hdfs-default.xml |   6 +-
 .../server/namenode/TestFSNamesystemLock.java   |  16 +--
 3 files changed, 82 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad49098e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index d90e1d0..be33c4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -1,4 +1,3 @@
-
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -46,7 +45,11 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPOR
  * {@link 
org.apache.hadoop.hdfs.DFSConfigKeys#DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY}
  * to be true, metrics will be emitted into the FSNamesystem metrics registry
  * for each operation which acquires this lock indicating how long the 
operation
- * held the lock for. Note that if a thread dies, metrics produced after the
+ * held the lock for. These metrics have names of the form
+ * FSN(Read|Write)LockNanosOperationName, where OperationName denotes the name
+ * of the operation that initiated the lock hold (this will be OTHER for 
certain
+ * uncategorized operations) and they export the hold time values in
+ * nanoseconds. Note that if a thread dies, metrics produced after the
  * most recent snapshot will be lost due to the use of
  * {@link MutableRatesWithAggregation}. However since threads are re-used
  * between operations this should not generally be an issue.
@@ -63,24 +66,26 @@ class FSNamesystemLock {
* Log statements about long lock hold times will not be produced more
* frequently than this interval.
*/
-  private final long lockSuppressWarningInterval;
+  private final long lockSuppressWarningIntervalMs;
 
   /** Threshold (ms) for long holding write lock report. */
-  private final long writeLockReportingThreshold;
+  private final long writeLockReportingThresholdMs;
   /** Last time stamp for write lock. Keep the longest one for 
multi-entrance.*/
-  private long writeLockHeldTimeStamp;
+  private long writeLockHeldTimeStampNanos;
   private int numWriteLockWarningsSuppressed = 0;
-  private long timeStampOfLastWriteLockReport = 0;
-  private long longestWriteLockHeldInterval = 0;
+  /** Time stamp (ms) of the last time a write lock report was written. */
+  private long timeStampOfLastWriteLockReportMs = 0;
+  /** Longest time (ms) a write lock was held since the last report. */
+  private long longestWriteLockHeldIntervalMs = 0;
 
   /** Threshold (ms) for long holding read lock report. */
-  private final long readLockReportingThreshold;
+  private final long readLockReportingThresholdMs;
   /**
* Last time stamp for read lock. Keep the longest one for
* multi-entrance. This is ThreadLocal since there could be
* many read locks held simultaneously.
*/
-  private final ThreadLocal readLockHeldTimeStamp =
+  private final ThreadLocal readLockHeldTimeStampNanos =
   new ThreadLocal() {
 @Override
 public Long initialValue() {
@@ -89,13 +94,16 @@ class FSNamesystemLock {
   };
   private final AtomicInteger numReadLockWarningsSuppressed =
   new AtomicInteger(0);
-  private final AtomicLong timeStampOfLastReadLockReport = new AtomicLong(0);
-  private final AtomicLong longestReadLockHeldInterval = new AtomicLong(0);
+  /** Time stamp (ms) of the last time a read lock report was written. */
+  private final AtomicLong timeStampOfLastReadLockReportMs = new AtomicLong(0);
+  /** Longest time 

hadoop git commit: HDFS-11631. Block Storage : allow cblock server to be started from hdfs commandline. Contributed by Chen Liang.

2017-04-17 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 49c92ea73 -> 350220bfb


HDFS-11631. Block Storage : allow cblock server to be started from hdfs 
commandline. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/350220bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/350220bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/350220bf

Branch: refs/heads/HDFS-7240
Commit: 350220bfb3c555ddf8ab4f0f5a444e37a6d3d9db
Parents: 49c92ea
Author: Anu Engineer 
Authored: Mon Apr 17 15:54:39 2017 -0700
Committer: Anu Engineer 
Committed: Mon Apr 17 15:54:39 2017 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   | 11 +
 .../apache/hadoop/cblock/CBlockConfigKeys.java  | 15 ++-
 .../org/apache/hadoop/cblock/CBlockManager.java | 47 
 3 files changed, 71 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/350220bf/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 1ae5de4..afb27c3 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -33,6 +33,8 @@ function hadoop_usage
 
   hadoop_add_subcommand "balancer" "run a cluster balancing utility"
   hadoop_add_subcommand "cacheadmin" "configure the HDFS cache"
+  hadoop_add_subcommand "cblock" "cblock CLI"
+  hadoop_add_subcommand "cblockserver" "run cblock server"
   hadoop_add_subcommand "classpath" "prints the class path needed to get the 
hadoop jar and the required libraries"
   hadoop_add_subcommand "crypto" "configure HDFS encryption zones"
   hadoop_add_subcommand "datanode" "run a DFS datanode"
@@ -86,6 +88,15 @@ function hdfscmd_case
 cacheadmin)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin
 ;;
+cblock)
+  HADOOP_CLASSNAME=org.apache.hadoop.cblock.cli.CBlockCli
+;;
+cblockserver)
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  HADOOP_CLASSNAME=org.apache.hadoop.cblock.CBlockManager
+  hadoop_debug "Appending HADOOP_CBLOCK_OPTS onto HADOOP_OPTS"
+  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CBLOCK_OPTS}"
+;;
 classpath)
   hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/350220bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
index 8aa4ae8..b1fba41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
@@ -142,9 +142,20 @@ public final class CBlockConfigKeys {
   public static final String DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_DEFAULT =
   "127.0.0.1";
 
+  // to what address cblock server should talk to scm?
+  public static final String DFS_CBLOCK_SCM_IPADDRESS_KEY =
+  "dfs.cblock.scm.ipaddress";
+  public static final String DFS_CBLOCK_SCM_IPADDRESS_DEFAULT =
+  "127.0.0.1";
+  public static final String DFS_CBLOCK_SCM_PORT_KEY =
+  "dfs.cblock.scm.port";
+  public static final int DFS_CBLOCK_SCM_PORT_DEFAULT = 9860;
+
   public static final String DFS_CBLOCK_CONTAINER_SIZE_GB_KEY =
-  "dfs.cblock.container.size.gb";
-  public static final int DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT = 5;
+  "dfs.cblock.container.size";
+  public static final int DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT =
+  5;
+
 
   private CBlockConfigKeys() {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/350220bf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java
index c7abed6..9f8d5b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/cblock/CBlockManager.java
@@ -30,6 +30,11 @@ import 
org.apache.hadoop.cblock.protocolPB.CBlockClientServerProtocolPB;
 import 

hadoop git commit: YARN-6304. Skip rm.transitionToActive call to RM if RM is already active. Contributed by Rohith Sharma K S.

2017-04-17 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.1 311d92439 -> 562af708c


YARN-6304. Skip rm.transitionToActive call to RM if RM is already active. 
Contributed by Rohith Sharma K S.

(cherry picked from commit 6ed9d362242d36bfae4083bf3b1c2eb32fbb2f72)
(cherry picked from commit 9289f4527d8fc3418fdeb8bf213b0f62525092d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/562af708
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/562af708
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/562af708

Branch: refs/heads/branch-2.8.1
Commit: 562af708ca596e8c106905ff3e0a5921991e808b
Parents: 311d924
Author: Junping Du 
Authored: Mon Apr 17 15:51:10 2017 -0700
Committer: Junping Du 
Committed: Mon Apr 17 15:59:32 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/AdminService.java   | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/562af708/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 5371a21..74be9cd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -288,6 +288,9 @@ public class AdminService extends CompositeService 
implements
   @Override
   public synchronized void transitionToActive(
   HAServiceProtocol.StateChangeRequestInfo reqInfo) throws IOException {
+if (isRMActive()) {
+  return;
+}
 // call refreshAdminAcls before HA state transition
 // for the case that adminAcls have been updated in previous active RM
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6304. Skip rm.transitionToActive call to RM if RM is already active. Contributed by Rohith Sharma K S.

2017-04-17 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5970e8297 -> 0de6f2802


YARN-6304. Skip rm.transitionToActive call to RM if RM is already active. 
Contributed by Rohith Sharma K S.

(cherry picked from commit 6ed9d362242d36bfae4083bf3b1c2eb32fbb2f72)
(cherry picked from commit 9289f4527d8fc3418fdeb8bf213b0f62525092d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0de6f280
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0de6f280
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0de6f280

Branch: refs/heads/branch-2.8
Commit: 0de6f2802038bb44b368ebd92cd664a301c4af56
Parents: 5970e82
Author: Junping Du 
Authored: Mon Apr 17 15:51:10 2017 -0700
Committer: Junping Du 
Committed: Mon Apr 17 15:59:00 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/AdminService.java   | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de6f280/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 5371a21..74be9cd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -288,6 +288,9 @@ public class AdminService extends CompositeService 
implements
   @Override
   public synchronized void transitionToActive(
   HAServiceProtocol.StateChangeRequestInfo reqInfo) throws IOException {
+if (isRMActive()) {
+  return;
+}
 // call refreshAdminAcls before HA state transition
 // for the case that adminAcls have been updated in previous active RM
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6304. Skip rm.transitionToActive call to RM if RM is already active. Contributed by Rohith Sharma K S.

2017-04-17 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6f5912c10 -> 9289f4527


YARN-6304. Skip rm.transitionToActive call to RM if RM is already active. 
Contributed by Rohith Sharma K S.

(cherry picked from commit 6ed9d362242d36bfae4083bf3b1c2eb32fbb2f72)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9289f452
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9289f452
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9289f452

Branch: refs/heads/branch-2
Commit: 9289f4527d8fc3418fdeb8bf213b0f62525092d5
Parents: 6f5912c
Author: Junping Du 
Authored: Mon Apr 17 15:51:10 2017 -0700
Committer: Junping Du 
Committed: Mon Apr 17 15:52:02 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/AdminService.java   | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9289f452/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index bcf7309..74c87a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -286,6 +286,9 @@ public class AdminService extends CompositeService 
implements
   @Override
   public synchronized void transitionToActive(
   HAServiceProtocol.StateChangeRequestInfo reqInfo) throws IOException {
+if (isRMActive()) {
+  return;
+}
 // call refreshAdminAcls before HA state transition
 // for the case that adminAcls have been updated in previous active RM
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6304. Skip rm.transitionToActive call to RM if RM is already active. Contributed by Rohith Sharma K S.

2017-04-17 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk f1de2c856 -> 6ed9d3622


YARN-6304. Skip rm.transitionToActive call to RM if RM is already active. 
Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ed9d362
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ed9d362
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ed9d362

Branch: refs/heads/trunk
Commit: 6ed9d362242d36bfae4083bf3b1c2eb32fbb2f72
Parents: f1de2c8
Author: Junping Du 
Authored: Mon Apr 17 15:51:10 2017 -0700
Committer: Junping Du 
Committed: Mon Apr 17 15:51:10 2017 -0700

--
 .../apache/hadoop/yarn/server/resourcemanager/AdminService.java   | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed9d362/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index bcf7309..74c87a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -286,6 +286,9 @@ public class AdminService extends CompositeService 
implements
   @Override
   public synchronized void transitionToActive(
   HAServiceProtocol.StateChangeRequestInfo reqInfo) throws IOException {
+if (isRMActive()) {
+  return;
+}
 // call refreshAdminAcls before HA state transition
 // for the case that adminAcls have been updated in previous active RM
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6432. FairScheduler: Reserve preempted resources for corresponding applications. (Miklos Szegedi via kasha)

2017-04-17 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6ade9e6ce -> 6f5912c10


YARN-6432. FairScheduler: Reserve preempted resources for corresponding 
applications. (Miklos Szegedi via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f5912c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f5912c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f5912c1

Branch: refs/heads/branch-2
Commit: 6f5912c106781ef252162cb63fd889ba536b0702
Parents: 6ade9e6
Author: Karthik Kambatla 
Authored: Mon Apr 17 14:16:53 2017 -0700
Committer: Karthik Kambatla 
Committed: Mon Apr 17 14:34:28 2017 -0700

--
 .../rmcontainer/RMContainer.java|   3 +-
 .../rmcontainer/RMContainerImpl.java|   2 +-
 .../scheduler/SchedulerNode.java|   2 +-
 .../scheduler/fair/FSAppAttempt.java|   8 +-
 .../scheduler/fair/FSPreemptionThread.java  |  25 +-
 .../scheduler/fair/FSSchedulerNode.java | 133 +-
 .../scheduler/fair/FairScheduler.java   |  41 +-
 .../scheduler/fair/TestFSSchedulerNode.java | 403 +++
 .../fair/TestFairSchedulerPreemption.java   |  19 +
 9 files changed, 597 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f5912c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
index 7ad381e..29680e5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
@@ -42,7 +42,8 @@ import 
org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
  * when resources are being reserved to fill space for a future container 
  * allocation.
  */
-public interface RMContainer extends EventHandler {
+public interface RMContainer extends EventHandler,
+Comparable {
 
   ContainerId getContainerId();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f5912c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 9264e4e..bb7084e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -63,7 +63,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
 @SuppressWarnings({"unchecked", "rawtypes"})
-public class RMContainerImpl implements RMContainer, Comparable {
+public class RMContainerImpl implements RMContainer {
 
   private static final Log LOG = LogFactory.getLog(RMContainerImpl.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f5912c1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
 

hadoop git commit: HDFS-11657. Ozone: Add unit test for storage container metrics. Contributed by Yiqun Lin.

2017-04-17 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 1a418ef2c -> 49c92ea73


HDFS-11657. Ozone: Add unit test for storage container metrics. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49c92ea7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49c92ea7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49c92ea7

Branch: refs/heads/HDFS-7240
Commit: 49c92ea73d9ac0ec45cf5ac682071cf8aa36bf36
Parents: 1a418ef
Author: Anu Engineer 
Authored: Mon Apr 17 12:44:29 2017 -0700
Committer: Anu Engineer 
Committed: Mon Apr 17 12:44:29 2017 -0700

--
 .../common/helpers/ContainerMetrics.java|  29 ++---
 .../container/metrics/TestContainerMetrics.java | 122 +++
 2 files changed, 137 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49c92ea7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index 55c0fbc..23ba00b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -62,23 +62,24 @@ public class ContainerMetrics {
 this.opsLatQuantiles = new MutableQuantiles[numEnumEntries][len];
 this.registry = new MetricsRegistry("StorageContainerMetrics");
 for (int i = 0; i < numEnumEntries; i++) {
-  numOpsArray[i] =
-  registry.newCounter("num" + ContainerProtos.Type.valueOf(i),
-  "number of " + ContainerProtos.Type.valueOf(i) + " ops", 
(long)0);
-  opsBytesArray[i] =
-  registry.newCounter("bytes" + ContainerProtos.Type.valueOf(i),
-  "bytes used by " + ContainerProtos.Type.valueOf(i)+"op", 
(long)0);
-  opsLatency[i] =
-  registry.newRate("latency" + ContainerProtos.Type.valueOf(i),
- ContainerProtos.Type.valueOf(i) + " op");
+  numOpsArray[i] = registry.newCounter(
+  "num" + ContainerProtos.Type.valueOf(i + 1),
+  "number of " + ContainerProtos.Type.valueOf(i + 1) + " ops",
+  (long) 0);
+  opsBytesArray[i] = registry.newCounter(
+  "bytes" + ContainerProtos.Type.valueOf(i + 1),
+  "bytes used by " + ContainerProtos.Type.valueOf(i + 1) + "op",
+  (long) 0);
+  opsLatency[i] = registry.newRate(
+  "latency" + ContainerProtos.Type.valueOf(i + 1),
+  ContainerProtos.Type.valueOf(i + 1) + " op");
 
   for (int j = 0; j < len; j++) {
 int interval = intervals[j];
-String quantileName = ContainerProtos.Type.valueOf(i) + "Nanos" +
-  interval + "s";
-opsLatQuantiles[i][j] =
-  registry.newQuantiles(quantileName, "latency of Container ops",
-"ops", "latency", interval);
+String quantileName = ContainerProtos.Type.valueOf(i + 1) + "Nanos"
++ interval + "s";
+opsLatQuantiles[i][j] = registry.newQuantiles(quantileName,
+"latency of Container ops", "ops", "latency", interval);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49c92ea7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
new file mode 100644
index 000..df99577
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable 

hadoop git commit: MAPREDUCE-6875. Rename mapred-site.xml.template to mapred-site.xml. (Yuanbo Liu via Haibo Chen)

2017-04-17 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk ac3cfdf3e -> f1de2c856


MAPREDUCE-6875. Rename mapred-site.xml.template to mapred-site.xml. (Yuanbo Liu 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1de2c85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1de2c85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1de2c85

Branch: refs/heads/trunk
Commit: f1de2c8560a4ce1a346727c2c265570c5b6b872e
Parents: ac3cfdf
Author: Haibo Chen 
Authored: Mon Apr 17 12:00:15 2017 -0700
Committer: Haibo Chen 
Committed: Mon Apr 17 12:25:30 2017 -0700

--
 hadoop-mapreduce-project/.gitignore |  1 -
 hadoop-mapreduce-project/conf/mapred-site.xml   | 21 
 .../conf/mapred-site.xml.template   | 21 
 3 files changed, 21 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1de2c85/hadoop-mapreduce-project/.gitignore
--
diff --git a/hadoop-mapreduce-project/.gitignore 
b/hadoop-mapreduce-project/.gitignore
index d230896..0a86cfe 100644
--- a/hadoop-mapreduce-project/.gitignore
+++ b/hadoop-mapreduce-project/.gitignore
@@ -29,7 +29,6 @@ conf/core-site.xml
 conf/hdfs-site.xml
 conf/hadoop-env.sh
 conf/hadoop-site.xml
-conf/mapred-site.xml
 conf/hadoop-policy.xml
 conf/capacity-scheduler.xml
 conf/fair-scheduler.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1de2c85/hadoop-mapreduce-project/conf/mapred-site.xml
--
diff --git a/hadoop-mapreduce-project/conf/mapred-site.xml 
b/hadoop-mapreduce-project/conf/mapred-site.xml
new file mode 100644
index 000..761c352
--- /dev/null
+++ b/hadoop-mapreduce-project/conf/mapred-site.xml
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1de2c85/hadoop-mapreduce-project/conf/mapred-site.xml.template
--
diff --git a/hadoop-mapreduce-project/conf/mapred-site.xml.template 
b/hadoop-mapreduce-project/conf/mapred-site.xml.template
deleted file mode 100644
index 761c352..000
--- a/hadoop-mapreduce-project/conf/mapred-site.xml.template
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
-
-
-
-
-
-


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/45] hadoop git commit: HADOOP-14225. Remove xmlenc dependency

2017-04-17 Thread inigoiri
HADOOP-14225. Remove xmlenc dependency


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/613fc92c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/613fc92c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/613fc92c

Branch: refs/heads/HDFS-10467
Commit: 613fc92c47b69bea8c412a7556a9a308b19e2f2c
Parents: 3e2d9a4
Author: Chris Douglas 
Authored: Mon Apr 10 11:48:40 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:00 2017 -0700

--
 LICENSE.txt |  1 -
 .../hadoop-client-minicluster/pom.xml   |  4 --
 hadoop-common-project/hadoop-common/pom.xml |  5 --
 .../hadoop/fs/MD5MD5CRC32FileChecksum.java  | 62 
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml |  5 --
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  5 --
 .../hadoop/hdfs/server/namenode/DfsServlet.java | 21 ---
 hadoop-project/pom.xml  |  5 --
 8 files changed, 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/613fc92c/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index a7e43c4..10af7ce 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1711,7 +1711,6 @@ Hamcrest Core 1.3
 ASM Core 5.0.4
 ASM Commons 5.0.2
 ASM Tree 5.0.2
-xmlenc Library 0.52
 

 (3-clause BSD)
 Redistribution and use in source and binary forms, with or without

http://git-wip-us.apache.org/repos/asf/hadoop/blob/613fc92c/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index c58ac38..b6197b3 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -218,10 +218,6 @@
   javax.servlet
   javax.servlet-api
 
-
-  xmlenc
-  xmlenc
-
   
 
 

[45/45] hadoop git commit: YARN-6432. FairScheduler: Reserve preempted resources for corresponding applications. (Miklos Szegedi via kasha)

2017-04-17 Thread inigoiri
YARN-6432. FairScheduler: Reserve preempted resources for corresponding 
applications. (Miklos Szegedi via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6a0d2d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6a0d2d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6a0d2d3

Branch: refs/heads/HDFS-10467
Commit: b6a0d2d3ffc27ea51c88e8194eb9b0d594263261
Parents: fd1ec75
Author: Karthik Kambatla 
Authored: Wed Apr 12 14:17:13 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../rmcontainer/RMContainer.java|   3 +-
 .../rmcontainer/RMContainerImpl.java|   2 +-
 .../scheduler/SchedulerNode.java|   2 +-
 .../scheduler/fair/FSAppAttempt.java|   8 +-
 .../scheduler/fair/FSPreemptionThread.java  |  25 +-
 .../scheduler/fair/FSSchedulerNode.java | 133 +-
 .../scheduler/fair/FairScheduler.java   |  41 +-
 .../scheduler/fair/TestFSSchedulerNode.java | 403 +++
 .../fair/TestFairSchedulerPreemption.java   |  19 +
 9 files changed, 597 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6a0d2d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
index 7ad381e..29680e5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
@@ -42,7 +42,8 @@ import 
org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
  * when resources are being reserved to fill space for a future container 
  * allocation.
  */
-public interface RMContainer extends EventHandler {
+public interface RMContainer extends EventHandler,
+Comparable {
 
   ContainerId getContainerId();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6a0d2d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 12fbbea..1e9463a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -63,7 +63,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
 @SuppressWarnings({"unchecked", "rawtypes"})
-public class RMContainerImpl implements RMContainer, Comparable {
+public class RMContainerImpl implements RMContainer {
 
   private static final Log LOG = LogFactory.getLog(RMContainerImpl.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6a0d2d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index af4a001..272537c 100644
--- 

[21/45] hadoop git commit: HDFS-11630. TestThrottledAsyncCheckerTimeout fails intermittently in Jenkins builds. Contributed by Hanisha Koneru.

2017-04-17 Thread inigoiri
HDFS-11630. TestThrottledAsyncCheckerTimeout fails intermittently in Jenkins 
builds. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ac91db6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ac91db6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ac91db6

Branch: refs/heads/HDFS-10467
Commit: 0ac91db62186a381e80b0ced05fe52fee0491cc6
Parents: 99ae945
Author: Hanisha Koneru 
Authored: Tue Apr 11 21:36:24 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:01 2017 -0700

--
 .../TestDatasetVolumeCheckerTimeout.java|  2 +-
 .../TestThrottledAsyncCheckerTimeout.java   | 45 ++--
 2 files changed, 24 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac91db6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java
index dc091ed..953de4f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java
@@ -94,7 +94,7 @@ public class TestDatasetVolumeCheckerTimeout {
 return volume;
   }
 
-  @Test (timeout = 1000)
+  @Test (timeout = 30)
   public void testDiskCheckTimeout() throws Exception {
 LOG.info("Executing {}", testName.getMethodName());
 final FsVolumeSpi volume = makeSlowVolume();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ac91db6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
index 52cab57..b0993b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
@@ -17,6 +17,14 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anySet;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.timeout;
+import static org.mockito.Mockito.verify;
+
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
@@ -35,12 +43,7 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Matchers.anySet;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
+import org.junit.rules.Timeout;
 import org.slf4j.LoggerFactory;
 
 import java.util.Set;
@@ -58,10 +61,10 @@ public class TestThrottledAsyncCheckerTimeout {
 
   @Rule
   public TestName testName = new TestName();
+  @Rule
+  public Timeout testTimeout = new Timeout(300_000);
 
-  Configuration conf;
   private static final long DISK_CHECK_TIMEOUT = 10;
-  private static final long DISK_CHECK_TIME = 100;
   private ReentrantLock lock;
 
   private ExecutorService getExecutorService() {
@@ -73,7 +76,7 @@ public class TestThrottledAsyncCheckerTimeout {
 lock = new ReentrantLock();
   }
 
-  @Test(timeout = 1000)
+  @Test
   public void testDiskCheckTimeout() throws Exception {
 LOG.info("Executing {}", testName.getMethodName());
 
@@ -123,7 +126,7 @@ public class TestThrottledAsyncCheckerTimeout {
 assertTrue(throwable[0] instanceof TimeoutException);
   }
 
-  @Test (timeout = 2000)
+  @Test
   public void testDiskCheckTimeoutInvokesOneCallbackOnly() throws Exception {
 LOG.info("Executing {}", testName.getMethodName());
 
@@ -143,13 +146,12 @@ public 

[25/45] hadoop git commit: YARN-6463. Correct spelling mistake in FileSystemRMStateStore. Contributed by Yeliang Cang.

2017-04-17 Thread inigoiri
YARN-6463. Correct spelling mistake in FileSystemRMStateStore. Contributed by 
Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d33e515
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d33e515
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d33e515

Branch: refs/heads/HDFS-10467
Commit: 7d33e5154e6e2ea60c6188131e10f6421cb428e5
Parents: 28dde86
Author: Naganarasimha 
Authored: Tue Apr 11 22:42:08 2017 +0530
Committer: Inigo 
Committed: Mon Apr 17 11:17:01 2017 -0700

--
 .../resourcemanager/recovery/FileSystemRMStateStore.java | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d33e515/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 929e2da..9591945 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -785,7 +785,7 @@ public class FileSystemRMStateStore extends RMStateStore {
* atomic for underlying file system.
*/
   protected void writeFile(Path outputPath, byte[] data, boolean
-  makeUnradableByAdmin) throws Exception {
+  makeUnreadableByAdmin) throws Exception {
 Path tempPath =
 new Path(outputPath.getParent(), outputPath.getName() + ".tmp");
 FSDataOutputStream fsOut = null;
@@ -793,7 +793,7 @@ public class FileSystemRMStateStore extends RMStateStore {
 // final status.
 try {
   fsOut = fs.create(tempPath, true);
-  if (makeUnradableByAdmin) {
+  if (makeUnreadableByAdmin) {
 setUnreadableBySuperuserXattrib(tempPath);
   }
   fsOut.write(data);
@@ -811,10 +811,10 @@ public class FileSystemRMStateStore extends RMStateStore {
* atomic for underlying file system.
*/
   protected void updateFile(Path outputPath, byte[] data, boolean
-  makeUnradableByAdmin) throws Exception {
+  makeUnreadableByAdmin) throws Exception {
 Path newPath = new Path(outputPath.getParent(), outputPath.getName() + 
".new");
 // use writeFileWithRetries to make sure .new file is created atomically
-writeFileWithRetries(newPath, data, makeUnradableByAdmin);
+writeFileWithRetries(newPath, data, makeUnreadableByAdmin);
 replaceFile(newPath, outputPath);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/45] hadoop git commit: YARN-6368. Decommissioning an NM results in a -1 exit code (miklos.szeg...@cloudera.com via rkanter)

2017-04-17 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10467 0e4661f7f -> 01e6d57d1


YARN-6368. Decommissioning an NM results in a -1 exit code 
(miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f1f8c6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f1f8c6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f1f8c6a

Branch: refs/heads/HDFS-10467
Commit: 1f1f8c6adb784078b263948c72b1afe2ce5e4e72
Parents: 7b09362
Author: Robert Kanter 
Authored: Fri Apr 7 14:28:07 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:58 2017 -0700

--
 .../yarn/server/nodemanager/NodeManager.java| 26 +---
 .../nodemanager/TestNodeManagerResync.java  |  2 +-
 2 files changed, 23 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f1f8c6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 60bd00b..1cff53f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -83,6 +83,24 @@ public class NodeManager extends CompositeService
 implements EventHandler {
 
   /**
+   * Node manager return status codes.
+   */
+  public enum NodeManagerStatus {
+NO_ERROR(0),
+EXCEPTION(1);
+
+private int exitCode;
+
+NodeManagerStatus(int exitCode) {
+  this.exitCode = exitCode;
+}
+
+public int getExitCode() {
+  return exitCode;
+}
+  }
+
+  /**
* Priority of the NodeManager shutdown hook.
*/
   public static final int SHUTDOWN_HOOK_PRIORITY = 30;
@@ -421,7 +439,7 @@ public class NodeManager extends CompositeService
 return "NodeManager";
   }
 
-  protected void shutDown() {
+  protected void shutDown(final int exitCode) {
 new Thread() {
   @Override
   public void run() {
@@ -432,7 +450,7 @@ public class NodeManager extends CompositeService
 } finally {
   if (shouldExitOnShutdownEvent
   && !ShutdownHookManager.get().isShutdownInProgress()) {
-ExitUtil.terminate(-1);
+ExitUtil.terminate(exitCode);
   }
 }
   }
@@ -457,7 +475,7 @@ public class NodeManager extends CompositeService
 .rebootNodeStatusUpdaterAndRegisterWithRM();
 } catch (YarnRuntimeException e) {
   LOG.fatal("Error while rebooting NodeStatusUpdater.", e);
-  shutDown();
+  shutDown(NodeManagerStatus.EXCEPTION.getExitCode());
 }
   }
 }.start();
@@ -744,7 +762,7 @@ public class NodeManager extends CompositeService
   public void handle(NodeManagerEvent event) {
 switch (event.getType()) {
 case SHUTDOWN:
-  shutDown();
+  shutDown(NodeManagerStatus.NO_ERROR.getExitCode());
   break;
 case RESYNC:
   resyncWithRM();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f1f8c6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 04cfae9..5ab5c37 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -638,7 +638,7 @@ public class TestNodeManagerResync {
 }
 
 @Override
-protected void shutDown() {
+protected void shutDown(int exitCode) {
   synchronized (isNMShutdownCalled) {
 

[10/45] hadoop git commit: HADOOP-14285. Update minimum version of Maven from 3.0 to 3.3.

2017-04-17 Thread inigoiri
HADOOP-14285. Update minimum version of Maven from 3.0 to 3.3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ea81047
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ea81047
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ea81047

Branch: refs/heads/HDFS-10467
Commit: 7ea81047d16736ae0b83d9eee4dcc843ace83a6d
Parents: f1c3ece
Author: Akira Ajisaka 
Authored: Sat Apr 8 14:23:26 2017 +0900
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 BUILDING.txt  |  2 +-
 dev-support/docker/Dockerfile | 16 ++--
 hadoop-project/pom.xml|  2 +-
 3 files changed, 12 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ea81047/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 5d331d4..57dad32 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -5,7 +5,7 @@ Requirements:
 
 * Unix System
 * JDK 1.8+
-* Maven 3.0 or later
+* Maven 3.3 or later
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ea81047/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index e3e7810..a135c61 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -81,13 +81,17 @@ RUN apt-get -q install --no-install-recommends -y 
oracle-java8-installer
 
 # Apps that require Java
 ###
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
-ant \
-maven
+RUN apt-get -q update && apt-get -q install --no-install-recommends -y ant
 
-# Fixing the Apache commons / Maven dependency problem under Ubuntu:
-# See http://wiki.apache.org/commons/VfsProblems
-RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
+##
+# Install Apache Maven
+##
+RUN mkdir -p /opt/maven && \
+curl -L -s -S \
+ 
http://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
 \
+ -o /opt/maven.tar.gz && \
+tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
+ENV MAVEN_HOME /opt/maven
 
 ##
 # Install findbugs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ea81047/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f327933..bf93f0f 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -103,7 +103,7 @@
 for an open-ended enforcement
 -->
 [${javac.version},)
-[3.0.2,)
+[3.3.0,)
 
 
 -Xmx2048m 
-XX:+HeapDumpOnOutOfMemoryError


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/45] hadoop git commit: HDFS-11645. DataXceiver thread should log the actual error when getting InvalidMagicNumberException. Contributed by Chen Liang.

2017-04-17 Thread inigoiri
HDFS-11645. DataXceiver thread should log the actual error when getting 
InvalidMagicNumberException. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7608d5c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7608d5c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7608d5c8

Branch: refs/heads/HDFS-10467
Commit: 7608d5c878b5104c1a23f6fa0f2d06da1cb5c9f8
Parents: 88cf9b9
Author: Anu Engineer 
Authored: Wed Apr 12 11:40:58 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:02 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7608d5c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index f838fd9..706d93a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -241,12 +241,12 @@ class DataXceiver extends Receiver implements Runnable {
   LOG.info("Failed to read expected encryption handshake from client " 
+
   "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
   "is running an older version of Hadoop which does not support " +
-  "encryption");
+  "encryption", imne);
 } else {
   LOG.info("Failed to read expected SASL data transfer protection " +
   "handshake from client at " + peer.getRemoteAddressString() + 
   ". Perhaps the client is running an older version of Hadoop " +
-  "which does not support SASL data transfer protection");
+  "which does not support SASL data transfer protection", imne);
 }
 return;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/45] hadoop git commit: HDFS-11648. Lazy construct the IIP pathname. Contributed by Daryn Sharp.

2017-04-17 Thread inigoiri
HDFS-11648. Lazy construct the IIP pathname. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88cf9b9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88cf9b9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88cf9b9a

Branch: refs/heads/HDFS-10467
Commit: 88cf9b9a715b7d38e7b140912b1ddb82bfd81a4d
Parents: 5b2e824
Author: Kihwal Lee 
Authored: Wed Apr 12 13:29:24 2017 -0500
Committer: Inigo 
Committed: Mon Apr 17 11:17:02 2017 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/INodesInPath.java   | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cf9b9a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index b37321d..1d5dbf6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -238,7 +238,7 @@ public class INodesInPath {
   }
 
   private final byte[][] path;
-  private final String pathname;
+  private volatile String pathname;
 
   /**
* Array with the specified number of INodes resolved for a given path.
@@ -268,7 +268,6 @@ public class INodesInPath {
 Preconditions.checkArgument(inodes != null && path != null);
 this.inodes = inodes;
 this.path = path;
-this.pathname = DFSUtil.byteArray2PathString(path);
 this.isRaw = isRaw;
 this.isSnapshot = isSnapshot;
 this.snapshotId = snapshotId;
@@ -329,6 +328,9 @@ public class INodesInPath {
 
   /** @return the full path in string form */
   public String getPath() {
+if (pathname == null) {
+  pathname = DFSUtil.byteArray2PathString(path);
+}
 return pathname;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/45] hadoop git commit: HDFS-11637. Fix javac warning caused by the deprecated key used in TestDFSClientRetries#testFailuresArePerOperation. Contributed by Yiqun Lin.

2017-04-17 Thread inigoiri
HDFS-11637. Fix javac warning caused by the deprecated key used in 
TestDFSClientRetries#testFailuresArePerOperation. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3eeac570
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3eeac570
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3eeac570

Branch: refs/heads/HDFS-10467
Commit: 3eeac57076202d536a6c16ae89bc92c782a527d3
Parents: 2b6160e
Author: Yiqun Lin 
Authored: Tue Apr 11 19:40:09 2017 +0800
Committer: Inigo 
Committed: Mon Apr 17 11:17:00 2017 -0700

--
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3eeac570/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 6db70d5..bb5a8d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -370,7 +370,7 @@ public class TestDFSClientRetries {
 String file1 = "/testFile1";
 String file2 = "/testFile2";
 // Set short retry timeouts so this test runs faster
-conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
 conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/45] hadoop git commit: YARN-6343. Docker docs MR example is broken (Contributed by Prashant Jha via Daniel Templeton)

2017-04-17 Thread inigoiri
YARN-6343. Docker docs MR example is broken (Contributed by Prashant Jha via 
Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/954f7f5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/954f7f5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/954f7f5e

Branch: refs/heads/HDFS-10467
Commit: 954f7f5ed596496b44c7e463e5d6187a1d6d
Parents: 29efc15
Author: Daniel Templeton 
Authored: Mon Apr 10 09:59:53 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 .../hadoop-yarn-site/src/site/markdown/DockerContainers.md   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/954f7f5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index e66d079..4de0a6a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -269,8 +269,8 @@ To submit the pi job to run in Docker containers, run the 
following commands:
 
 ```
 
vars="YARN_CONTAINER_RUNTIME_TYPE=docker,YARN_CONTAINER_RUNTIME_DOCKER_IMAGE=hadoop-docker"
-hadoop jar hadoop-examples.jar -Dyarn.app.mapreduce.am.env=$vars \
--Dmapreduce.map.env=$vars -Dmapreduce.reduce.env=$vars pi 10 100
+hadoop jar hadoop-examples.jar pi -Dyarn.app.mapreduce.am.env=$vars \
+-Dmapreduce.map.env=$vars -Dmapreduce.reduce.env=$vars 10 100
 ```
 
 Note that the application master, map tasks, and reduce tasks are configured


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/45] hadoop git commit: YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. Contributed by Eric Badger

2017-04-17 Thread inigoiri
YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b326822c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b326822c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b326822c

Branch: refs/heads/HDFS-10467
Commit: b326822cc2e74ed7bf8f14503fddad56ec775367
Parents: f592346
Author: Eric Payne 
Authored: Fri Apr 14 10:53:09 2017 -0500
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../scheduler/capacity/TestCapacityScheduler.java| 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b326822c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 447ee3d..bf1f6eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -40,6 +40,7 @@ import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.CyclicBarrier;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -48,6 +49,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.LocalConfigurationProvider;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -3626,7 +3628,7 @@ public class TestCapacityScheduler {
 Assert.assertEquals(queueInfoB.getDefaultNodeLabelExpression(), "y");
   }
 
-  @Test(timeout = 3)
+  @Test(timeout = 6)
   public void testAMLimitUsage() throws Exception {
 
 CapacitySchedulerConfiguration config =
@@ -3754,7 +3756,8 @@ public class TestCapacityScheduler {
   private void verifyAMLimitForLeafQueue(CapacitySchedulerConfiguration config)
   throws Exception {
 MockRM rm = setUpMove(config);
-rm.registerNode("127.0.0.1:1234", 2 * GB);
+int nodeMemory = 4 * GB;
+rm.registerNode("127.0.0.1:1234", nodeMemory);
 
 String queueName = "a1";
 String userName = "user_0";
@@ -3770,6 +3773,14 @@ public class TestCapacityScheduler {
 Resource.newInstance(amResourceLimit.getMemorySize() + 2048,
 amResourceLimit.getVirtualCores() + 1);
 
+// Wait for the scheduler to be updated with new node capacity
+GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  return scheduler.getMaximumResourceCapability().getMemorySize() == 
nodeMemory;
+}
+  }, 100, 60 * 1000);
+
 rm.submitApp(amResource1, "app-1", userName, null, queueName);
 
 rm.submitApp(amResource2, "app-2", userName, null, queueName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/45] hadoop git commit: HADOOP-14202. fix jsvc/secure user var inconsistencies

2017-04-17 Thread inigoiri
HADOOP-14202. fix jsvc/secure user var inconsistencies

Signed-off-by: John Zhuge 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5efa8d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5efa8d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5efa8d1

Branch: refs/heads/HDFS-10467
Commit: f5efa8d105b28d7bb1c2ec90b1abd1cce95f8bd1
Parents: e51fb98
Author: Allen Wittenauer 
Authored: Wed Mar 29 09:56:25 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:58 2017 -0700

--
 .../hadoop-common/src/main/bin/hadoop   |  69 +-
 .../hadoop-common/src/main/bin/hadoop-config.sh |  10 +-
 .../src/main/bin/hadoop-functions.sh| 217 +++
 .../hadoop-common/src/main/conf/hadoop-env.sh   |  13 +-
 .../src/site/markdown/SecureMode.md |  20 +-
 .../src/site/markdown/UnixShellGuide.md |  31 ++-
 .../scripts/hadoop_build_custom_subcmd_var.bats |  21 ++
 .../test/scripts/hadoop_detect_priv_subcmd.bats |  34 +++
 .../test/scripts/hadoop_get_verify_uservar.bats |  21 --
 .../src/test/scripts/hadoop_verify_user.bats|  53 -
 .../test/scripts/hadoop_verify_user_perm.bats   |  53 +
 .../scripts/hadoop_verify_user_resolves.bats|  44 
 .../hadoop-hdfs/src/main/bin/hdfs   |  97 +
 .../hadoop-hdfs/src/main/bin/hdfs-config.sh |   5 +
 .../src/site/markdown/HdfsNfsGateway.md |  14 +-
 hadoop-mapreduce-project/bin/mapred |  64 +-
 hadoop-mapreduce-project/bin/mapred-config.sh   |   1 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  62 +-
 .../hadoop-yarn/bin/yarn-config.sh  |   1 +
 19 files changed, 418 insertions(+), 412 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5efa8d1/hadoop-common-project/hadoop-common/src/main/bin/hadoop
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 145e348..70f66a5 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -67,13 +67,11 @@ function hadoopcmd_case
   hadoop_error ""
   #try to locate hdfs and if present, delegate to it.
   if [[ -f "${HADOOP_HDFS_HOME}/bin/hdfs" ]]; then
-# shellcheck disable=SC2086
 exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
---config "${HADOOP_CONF_DIR}" "${subcmd}"  "$@"
+  --config "${HADOOP_CONF_DIR}" "${subcmd}"  "$@"
   elif [[ -f "${HADOOP_HOME}/bin/hdfs" ]]; then
-# shellcheck disable=SC2086
 exec "${HADOOP_HOME}/bin/hdfs" \
---config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
+  --config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
   else
 hadoop_error "HADOOP_HDFS_HOME not found!"
 exit 1
@@ -174,9 +172,9 @@ else
 fi
 
 HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
 HADOOP_NEW_CONFIG=true
 if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  # shellcheck 
source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
   . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
 else
   echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
@@ -201,7 +199,7 @@ if hadoop_need_reexec hadoop "${HADOOP_SUBCMD}"; then
   exit $?
 fi
 
-hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
 
 HADOOP_SUBCMD_ARGS=("$@")
 
@@ -221,60 +219,5 @@ fi
 
 hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
 
-if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
-  HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
-
-  hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
-
-  hadoop_verify_secure_prereq
-  hadoop_setup_secure_service
-  
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
-  
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
-  
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
-  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
-  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
-else
-  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
-  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
-fi
-
-if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
-  # 

[17/45] hadoop git commit: HADOOP-13545. Update HSQLDB to 2.3.4. Contributed by Giovanni Matteo Fumarola.

2017-04-17 Thread inigoiri
HADOOP-13545. Update HSQLDB to 2.3.4. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b6160ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b6160ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b6160ee

Branch: refs/heads/HDFS-10467
Commit: 2b6160eea6c8c37b6a7340466bfaf04b4905e682
Parents: a728829
Author: Akira Ajisaka 
Authored: Tue Apr 11 14:29:48 2017 +0900
Committer: Inigo 
Committed: Mon Apr 17 11:17:00 2017 -0700

--
 LICENSE.txt| 2 +-
 hadoop-project/pom.xml | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6160ee/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 10af7ce..969708f 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -736,7 +736,7 @@ 
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3-LICENSE
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-HSQLDB Database 2.0.0
+HSQLDB Database 2.3.4
 

 (HSQL License)
 "COPYRIGHTS AND LICENSES (based on BSD License)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b6160ee/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8a0e7eb..6d4acba 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -131,6 +131,7 @@
 1.0-alpha-8
 900
 1.11.86
+2.3.4
 
 ${project.version}
@@ -1038,7 +1039,7 @@
   
 org.hsqldb
 hsqldb
-2.0.0
+${hsqldb.version}
   
   
 com.codahale.metrics


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/45] hadoop git commit: YARN-6439. Fix ReservationSystem creation of default ReservationQueue. (Carlo Curino via wangda)

2017-04-17 Thread inigoiri
YARN-6439. Fix ReservationSystem creation of default ReservationQueue. (Carlo 
Curino via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b286228
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b286228
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b286228

Branch: refs/heads/HDFS-10467
Commit: 8b2862283767b031d1e17e3db812fb9cdad552ad
Parents: 7d33e51
Author: Wangda Tan 
Authored: Tue Apr 11 14:56:18 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:01 2017 -0700

--
 .../capacity/CapacitySchedulerQueueManager.java | 20 
 .../scheduler/capacity/PlanQueue.java   |  4 ++--
 .../TestCapacitySchedulerPlanFollower.java  |  7 +++
 .../TestFairSchedulerPlanFollower.java  |  4 
 .../TestSchedulerPlanFollowerBase.java  |  9 -
 .../TestCapacitySchedulerDynamicBehavior.java   | 11 +++
 6 files changed, 52 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b286228/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index c92c343..be6243d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -41,10 +41,13 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.Permission;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueStateManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerQueueManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -220,6 +223,23 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
 queue =
 new PlanQueue(csContext, queueName, parent,
 oldQueues.get(queueName));
+
+//initializing the "internal" default queue, for SLS compatibility
+String defReservationId =
+queueName + ReservationConstants.DEFAULT_QUEUE_SUFFIX;
+
+List childQueues = new ArrayList<>();
+ReservationQueue resQueue = new ReservationQueue(csContext,
+defReservationId, (PlanQueue) queue);
+try {
+  resQueue.setEntitlement(new QueueEntitlement(1.0f, 1.0f));
+} catch (SchedulerDynamicEditException e) {
+  throw new IllegalStateException(e);
+}
+childQueues.add(resQueue);
+((PlanQueue) queue).setChildQueues(childQueues);
+queues.put(defReservationId, resQueue);
+
   } else {
 queue =
 new LeafQueue(csContext, queueName, parent,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b286228/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PlanQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PlanQueue.java
 

[29/45] hadoop git commit: HADOOP-14246. Authentication Tokens should use SecureRandom instead of Random and 256 bit secrets (Conttributed by Robert Konter via Daniel Templeton)

2017-04-17 Thread inigoiri
HADOOP-14246. Authentication Tokens should use SecureRandom instead of Random 
and 256 bit secrets
(Conttributed by Robert Konter via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b2e824c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b2e824c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b2e824c

Branch: refs/heads/HDFS-10467
Commit: 5b2e824cf71c497eafbac674d2f6922bc59d3bd9
Parents: a631172
Author: Daniel Templeton 
Authored: Wed Apr 12 11:17:31 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:02 2017 -0700

--
 .../util/RandomSignerSecretProvider.java|   9 +-
 .../util/ZKSignerSecretProvider.java|  10 +-
 .../util/TestRandomSignerSecretProvider.java|  68 ++--
 .../util/TestZKSignerSecretProvider.java| 154 ---
 4 files changed, 205 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b2e824c/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
index 41059a7..9245887 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
@@ -15,8 +15,9 @@ package org.apache.hadoop.security.authentication.util;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import java.nio.charset.Charset;
+import java.security.SecureRandom;
 import java.util.Random;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -32,7 +33,7 @@ public class RandomSignerSecretProvider extends 
RolloverSignerSecretProvider {
 
   public RandomSignerSecretProvider() {
 super();
-rand = new Random();
+rand = new SecureRandom();
   }
 
   /**
@@ -48,6 +49,8 @@ public class RandomSignerSecretProvider extends 
RolloverSignerSecretProvider {
 
   @Override
   protected byte[] generateNewSecret() {
-return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
+byte[] secret = new byte[32]; // 32 bytes = 256 bits
+rand.nextBytes(secret);
+return secret;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b2e824c/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
index 48dfaaa..a7fc76f 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
@@ -16,6 +16,7 @@ package org.apache.hadoop.security.authentication.util;
 import com.google.common.annotations.VisibleForTesting;
 import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
+import java.security.SecureRandom;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -149,7 +150,7 @@ public class ZKSignerSecretProvider extends 
RolloverSignerSecretProvider {
 
   public ZKSignerSecretProvider() {
 super();
-rand = new Random();
+rand = new SecureRandom();
   }
 
   /**
@@ -342,8 +343,11 @@ public class ZKSignerSecretProvider extends 
RolloverSignerSecretProvider {
 }
   }
 
-  private byte[] generateRandomSecret() {
-return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
+  @VisibleForTesting
+  protected byte[] generateRandomSecret() {
+byte[] secret = new byte[32]; // 32 bytes = 256 bits
+rand.nextBytes(secret);
+return secret;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b2e824c/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
--
diff --git 

[20/45] hadoop git commit: YARN-6372. Add default value for NM disk validator (Contributed by Yufei Gu via Daniel Templeton)

2017-04-17 Thread inigoiri
YARN-6372. Add default value for NM disk validator (Contributed by Yufei Gu via 
Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/908cf411
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/908cf411
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/908cf411

Branch: refs/heads/HDFS-10467
Commit: 908cf41177ac720f0c09b5286326ece1d7827498
Parents: 613fc92
Author: Daniel Templeton 
Authored: Mon Apr 10 14:55:22 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:00 2017 -0700

--
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java  | 3 ++-
 .../hadoop/yarn/server/nodemanager/DirectoryCollection.java   | 3 ++-
 .../containermanager/localizer/ContainerLocalizer.java| 3 ++-
 .../containermanager/localizer/ResourceLocalizationService.java   | 3 ++-
 4 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/908cf411/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 81cb8c6..fa4d2e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ActiveStandbyElector;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.BasicDiskValidator;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 
@@ -974,7 +975,7 @@ public class YarnConfiguration extends Configuration {
 
   /** Disk Validator. */
   public static final String DISK_VALIDATOR = NM_PREFIX + "disk-validator";
-  public static final String DEFAULT_DISK_VALIDATOR = "basic";
+  public static final String DEFAULT_DISK_VALIDATOR = BasicDiskValidator.NAME;
 
   /**
* Maximum size of contain's diagnostics to keep for relaunching container

http://git-wip-us.apache.org/repos/asf/hadoop/blob/908cf411/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index 72c32e8..ae2a4ef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -181,7 +181,8 @@ public class DirectoryCollection {
 conf = new YarnConfiguration();
 try {
   diskValidator = DiskValidatorFactory.getInstance(
-  conf.get(YarnConfiguration.DISK_VALIDATOR));
+  conf.get(YarnConfiguration.DISK_VALIDATOR,
+  YarnConfiguration.DEFAULT_DISK_VALIDATOR));
   LOG.info("Disk Validator: " + YarnConfiguration.DISK_VALIDATOR +
   " is loaded.");
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/908cf411/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java
index 959092b..6e79857 100644
--- 

[03/45] hadoop git commit: HADOOP-14174. Set default ADLS access token provider type to ClientCredential. Contributed by John Zhuge.

2017-04-17 Thread inigoiri
HADOOP-14174. Set default ADLS access token provider type to ClientCredential. 
Contributed by John Zhuge.

Signed-off-by: John Zhuge 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/224295f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/224295f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/224295f4

Branch: refs/heads/HDFS-10467
Commit: 224295f47ee61a6a351f74cd5d1913ebe97d2886
Parents: f5efa8d
Author: John Zhuge 
Authored: Tue Mar 28 09:40:14 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:58 2017 -0700

--
 .../src/main/resources/core-default.xml | 63 
 .../conf/TestCommonConfigurationFields.java |  2 +-
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  2 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  3 +-
 .../apache/hadoop/fs/adl/AdlMockWebServer.java  |  3 +
 .../hadoop/fs/adl/TestAzureADTokenProvider.java |  3 +
 .../hadoop/fs/adl/TestCustomTokenProvider.java  |  4 ++
 .../fs/adl/TestRelativePathFormation.java   |  4 ++
 8 files changed, 82 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/224295f4/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b8f9904..521b013 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2456,6 +2456,7 @@
 
 
   
+
   
 fs.adl.impl
 org.apache.hadoop.fs.adl.AdlFileSystem
@@ -2465,6 +2466,68 @@
 fs.AbstractFileSystem.adl.impl
 org.apache.hadoop.fs.adl.Adl
   
+
+  
+adl.feature.ownerandgroup.enableupn
+false
+
+  When true : User and Group in FileStatus/AclStatus response is
+  represented as user friendly name as per Azure AD profile.
+
+  When false (default) : User and Group in FileStatus/AclStatus
+  response is represented by the unique identifier from Azure AD
+  profile (Object ID as GUID).
+
+  For optimal performance, false is recommended.
+
+  
+
+  
+fs.adl.oauth2.access.token.provider.type
+ClientCredential
+
+  Defines Azure Active Directory OAuth2 access token provider type.
+  Supported types are ClientCredential, RefreshToken, and Custom.
+  The ClientCredential type requires property fs.adl.oauth2.client.id,
+  fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url.
+  The RefreshToken type requires property fs.adl.oauth2.client.id and
+  fs.adl.oauth2.refresh.token.
+  The Custom type requires property fs.adl.oauth2.access.token.provider.
+
+  
+
+  
+fs.adl.oauth2.client.id
+
+The OAuth2 client id.
+  
+
+  
+fs.adl.oauth2.credential
+
+The OAuth2 access key.
+  
+
+  
+fs.adl.oauth2.refresh.url
+
+The OAuth2 token endpoint.
+  
+
+  
+fs.adl.oauth2.refresh.token
+
+The OAuth2 refresh token.
+  
+
+  
+fs.adl.oauth2.access.token.provider
+
+
+  The class name of the OAuth2 access token provider.
+
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/224295f4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index cbfb6d1..8524973 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -105,7 +105,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 // ADL properties are in a different subtree
 // - org.apache.hadoop.hdfs.web.ADLConfKeys
 xmlPrefixToSkipCompare.add("adl.");
-xmlPropsToSkipCompare.add("fs.adl.impl");
+xmlPrefixToSkipCompare.add("fs.adl.");
 xmlPropsToSkipCompare.add("fs.AbstractFileSystem.adl.impl");
 
 // Azure properties are in a different class

http://git-wip-us.apache.org/repos/asf/hadoop/blob/224295f4/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 

[09/45] hadoop git commit: YARN-5153. Add a toggle button to switch between timeline view / table view for containers and application-attempts in new YARN UI. Contributed by Akhil PB.

2017-04-17 Thread inigoiri
YARN-5153. Add a toggle button to switch between timeline view / table view for 
containers and application-attempts in new YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29efc156
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29efc156
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29efc156

Branch: refs/heads/HDFS-10467
Commit: 29efc156ce1891ffdd33ef44107d52fe4055b183
Parents: 77880cc
Author: Sunil G 
Authored: Mon Apr 10 13:35:08 2017 +0530
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 .../webapp/app/components/app-attempt-table.js  |   7 -
 .../main/webapp/app/components/timeline-view.js | 199 ++-
 .../main/webapp/app/helpers/prepend-protocol.js |  29 +++
 .../templates/components/app-attempt-table.hbs  |  36 ++--
 .../templates/components/container-table.hbs|  22 +-
 .../app/templates/components/timeline-view.hbs  |  61 --
 .../tests/unit/helpers/prepend-protocol-test.js |  28 +++
 7 files changed, 331 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29efc156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
index 3c43037..8828275 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-attempt-table.js
@@ -19,11 +19,4 @@
 import Ember from 'ember';
 
 export default Ember.Component.extend({
-  nodeHttpAddressFormatted: Ember.computed('attempt.nodeHttpAddress', 
function() {
-var nodeHttpAddress = this.get('attempt.nodeHttpAddress');
-if (nodeHttpAddress && nodeHttpAddress.indexOf('://') < 0) {
-  nodeHttpAddress = 'http://' + nodeHttpAddress;
-}
-return nodeHttpAddress;
-  })
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29efc156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
index d730a43..4a33d5b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
@@ -18,6 +18,7 @@
 
 import Ember from 'ember';
 import Converter from 'yarn-ui/utils/converter';
+import ColumnDef from 'em-table/utils/column-definition';
 
 export default Ember.Component.extend({
   canvas: {
@@ -31,6 +32,8 @@ export default Ember.Component.extend({
   modelArr: [],
   colors: d3.scale.category10().range(),
   _selected: undefined,
+  gridColumns: [],
+  gridRows: [],
 
   selected: function() {
 return this._selected;
@@ -276,5 +279,199 @@ export default Ember.Component.extend({
 if (this.modelArr.length > 0) {
   this.setSelected(this.modelArr[0]);
 }
+
+if (this.get('attemptModel')) {
+  this.setAttemptsGridColumnsAndRows();
+} else {
+  this.setContainersGridColumnsAndRows();
+}
+  },
+
+  setAttemptsGridColumnsAndRows: function() {
+var self = this;
+var columns = [];
+
+columns.push({
+  id: 'id',
+  headerTitle: 'Attempt ID',
+  contentPath: 'id',
+  cellComponentName: 'em-table-linked-cell',
+  minWidth: '300px',
+  getCellContent: function(row) {
+return {
+  displayText: row.get('id'),
+  routeName: 'yarn-app-attempt',
+  id: row.get('id')
+};
+  }
+}, {
+  id: 'attemptStartedTime',
+  headerTitle: 'Started Time',
+  contentPath: 'attemptStartedTime'
+}, {
+  id: 'finishedTime',
+  headerTitle: 'Finished Time',
+  contentPath: 'finishedTime',
+  getCellContent: function(row) {
+if (row.get('finishedTs')) {
+  return row.get('finishedTime');
+}
+return 'N/A';
+  }
+}, {
+  id: 'elapsedTime',
+  headerTitle: 'Elapsed Time',
+  contentPath: 'elapsedTime'
+}, {
+  id: 'appMasterContainerId',
+  headerTitle: 'AM Container ID',
+  contentPath: 'appMasterContainerId',
+  minWidth: '300px'
+}, {
+  id: 'amNodeId',
+  headerTitle: 'AM Node ID',

[34/45] hadoop git commit: YARN-6421. Upgrade frontend-maven-plugin to 1.1 to fix new YARN UI build error in ppc64le. Contributed by Sonia Garudi.

2017-04-17 Thread inigoiri
YARN-6421. Upgrade frontend-maven-plugin to 1.1 to fix new YARN UI build error 
in ppc64le. Contributed by Sonia Garudi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2f889b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2f889b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2f889b8

Branch: refs/heads/HDFS-10467
Commit: d2f889b89dc4f106d97686b5aa91a7bd84928a0c
Parents: 5c6d4a1
Author: Sunil G 
Authored: Wed Apr 12 12:01:47 2017 +0530
Committer: Inigo 
Committed: Mon Apr 17 11:17:02 2017 -0700

--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2f889b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 40eb54d..bec99cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -123,7 +123,7 @@
   
 com.github.eirslett
 frontend-maven-plugin
-0.0.22
+1.1
 
   ${webappTgtDir}
 
@@ -172,7 +172,7 @@
   ${webappTgtDir}
   ${node.executable}
   
-node/npm/bin/npm-cli
+node/node_modules/npm/bin/npm-cli
 run
 build:mvn
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/45] hadoop git commit: YARN-6480. Timeout is too aggressive for TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger

2017-04-17 Thread inigoiri
YARN-6480. Timeout is too aggressive for 
TestAMRestart.testPreemptedAMRestartOnRMRestart. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/242bb479
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/242bb479
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/242bb479

Branch: refs/heads/HDFS-10467
Commit: 242bb479ddd16b345792766476d2011f45102a0f
Parents: 859b3a0
Author: Jason Lowe 
Authored: Fri Apr 14 17:15:48 2017 -0500
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../server/resourcemanager/applicationsmanager/TestAMRestart.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/242bb479/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 4fa8287..f5da5b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -490,7 +490,7 @@ public class TestAMRestart {
   // Test RM restarts after AM container is preempted, new RM should not count
   // AM preemption failure towards the max-retry-account and should be able to
   // re-launch the AM.
-  @Test(timeout = 2)
+  @Test(timeout = 6)
   public void testPreemptedAMRestartOnRMRestart() throws Exception {
 YarnConfiguration conf = new YarnConfiguration();
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/45] hadoop git commit: HADOOP-14274. Azure: Simplify Ranger-WASB policy model. Contributed by Sivaguru Sankaridurg

2017-04-17 Thread inigoiri
HADOOP-14274. Azure: Simplify Ranger-WASB policy model. Contributed by Sivaguru 
Sankaridurg


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5923469
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5923469
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5923469

Branch: refs/heads/HDFS-10467
Commit: f5923469822150f4172e5ca5dddf7be3df382ece
Parents: ad54dcf
Author: Mingliang Liu 
Authored: Wed Apr 12 16:07:10 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 129 +++--
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  11 +-
 .../fs/azure/WasbAuthorizationOperations.java   |   2 -
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  22 +-
 .../TestNativeAzureFileSystemAuthorization.java | 554 ---
 .../fs/azure/TestWasbRemoteCallHelper.java  |   6 +-
 .../TestAzureFileSystemInstrumentation.java |   3 +-
 7 files changed, 603 insertions(+), 124 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5923469/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 5469944..e06522b 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -1426,13 +1426,20 @@ public class NativeAzureFileSystem extends FileSystem {
 return store;
   }
 
-  private void performAuthCheck(String path, String accessType,
-  String operation) throws WasbAuthorizationException, IOException {
+  /**
+   * @param requestingAccessForPath - The path to the 
ancestor/parent/subtree/file that needs to be
+   *checked before granting access to 
originalPath
+   * @param accessType - The type of access READ/WRITE being requested
+   * @param operation - A string describing the operation being performed 
("delete", "create" etc.).
+   * @param originalPath - The originalPath that was being accessed
+   */
+  private void performAuthCheck(String requestingAccessForPath, 
WasbAuthorizationOperations accessType,
+  String operation, String originalPath) throws 
WasbAuthorizationException, IOException {
 
 if (azureAuthorization && this.authorizer != null &&
-!this.authorizer.authorize(path, accessType)) {
+!this.authorizer.authorize(requestingAccessForPath, 
accessType.toString())) {
   throw new WasbAuthorizationException(operation
-  + " operation for Path : " + path + " not allowed");
+  + " operation for Path : " + originalPath + " not allowed");
 }
   }
 
@@ -1459,8 +1466,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
 Path absolutePath = makeAbsolute(f);
 
-performAuthCheck(absolutePath.toString(),
-WasbAuthorizationOperations.WRITE.toString(), "append");
+performAuthCheck(absolutePath.toString(), 
WasbAuthorizationOperations.WRITE, "append", absolutePath.toString());
 
 String key = pathToKey(absolutePath);
 FileMetadata meta = null;
@@ -1663,9 +1669,9 @@ public class NativeAzureFileSystem extends FileSystem {
 }
 
 Path absolutePath = makeAbsolute(f);
+Path ancestor = getAncestor(absolutePath);
 
-performAuthCheck(absolutePath.toString(),
-WasbAuthorizationOperations.WRITE.toString(), "create");
+performAuthCheck(ancestor.toString(), WasbAuthorizationOperations.WRITE, 
"create", absolutePath.toString());
 
 String key = pathToKey(absolutePath);
 
@@ -1678,6 +1684,9 @@ public class NativeAzureFileSystem extends FileSystem {
   if (!overwrite) {
 throw new FileAlreadyExistsException("File already exists:" + f);
   }
+  else {
+performAuthCheck(absolutePath.toString(), 
WasbAuthorizationOperations.WRITE, "create", absolutePath.toString());
+  }
 }
 
 Path parentFolder = absolutePath.getParent();
@@ -1768,7 +1777,7 @@ public class NativeAzureFileSystem extends FileSystem {
 
   /**
* Delete the specified file or folder. The parameter
-   * skipParentFolderLastModifidedTimeUpdate
+   * skipParentFolderLastModifiedTimeUpdate
* is used in the case of atomic folder rename redo. In that case, there is
* a lease on the parent folder, so (without reworking the code) modifying
* the parent folder update time will fail because of a conflict with the
@@ -1778,20 +1787,20 @@ public class 

[43/45] hadoop git commit: MAPREDUCE-6673. Add a test example job that grows in memory usage over time (Karthik Kambatla via Haibo Chen)

2017-04-17 Thread inigoiri
MAPREDUCE-6673. Add a test example job that grows in memory usage over time 
(Karthik Kambatla via Haibo Chen)

Change-Id: Iccfc8c67c38c526cc61726d87bfcbcf69ac36fea


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01e6d57d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01e6d57d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01e6d57d

Branch: refs/heads/HDFS-10467
Commit: 01e6d57d1b4ae9c520fe1f0053d4da0e248fd3ff
Parents: 7cd0811
Author: Haibo Chen 
Authored: Fri Apr 14 17:33:04 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../hadoop/mapreduce/GrowingSleepJob.java   | 68 
 .../apache/hadoop/test/MapredTestDriver.java|  3 +
 2 files changed, 71 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01e6d57d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
new file mode 100644
index 000..55740f7
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.util.ToolRunner;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A sleep job whose mappers create 1MB buffer for every record.
+ */
+public class GrowingSleepJob extends SleepJob {
+  private static final Log LOG = LogFactory.getLog(GrowingSleepJob.class);
+
+  public static class GrowingSleepMapper extends SleepMapper {
+private final int MB = 1024 * 1024;
+private ArrayList bytes = new ArrayList<>();
+
+@Override
+public void map(IntWritable key, IntWritable value, Context context)
+throws IOException, InterruptedException {
+  super.map(key, value, context);
+  long free = Runtime.getRuntime().freeMemory();
+  if (free > 32 * MB) {
+LOG.info("Free memory = " + free +
+" bytes. Creating 1 MB on the heap.");
+bytes.add(new byte[MB]);
+  }
+}
+  }
+
+  public static void main(String[] args) throws Exception {
+int res = ToolRunner.run(new Configuration(), new GrowingSleepJob(), args);
+System.exit(res);
+  }
+
+  @Override
+  public Job createJob(int numMapper, int numReducer,
+   long mapSleepTime, int mapSleepCount,
+   long reduceSleepTime, int reduceSleepCount)
+  throws IOException {
+Job job = super.createJob(numMapper, numReducer, mapSleepTime,
+mapSleepCount, reduceSleepTime, reduceSleepCount);
+job.setMapperClass(GrowingSleepMapper.class);
+job.setJobName("Growing sleep job");
+return job;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01e6d57d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
index 76198b8..a2a13d5 100644

[39/45] hadoop git commit: HADOOP-14311. Add python2.7-dev to Dockerfile (Allen Wittenauer via asuresh)

2017-04-17 Thread inigoiri
HADOOP-14311. Add python2.7-dev to Dockerfile (Allen Wittenauer via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cd0811b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cd0811b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cd0811b

Branch: refs/heads/HDFS-10467
Commit: 7cd0811b6402848b196d6a9bdf32c5ee780e92cc
Parents: 242bb47
Author: Arun Suresh 
Authored: Fri Apr 14 16:56:16 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd0811b/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index a135c61..f939b1d 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -58,6 +58,7 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 protobuf-c-compiler \
 python \
 python2.7 \
+python2.7-dev \
 python-pip \
 rsync \
 snappy \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/45] hadoop git commit: HADOOP-14255. S3A to delete unnecessary fake directory objects in mkdirs(). Contributed by Mingliang Liu

2017-04-17 Thread inigoiri
HADOOP-14255. S3A to delete unnecessary fake directory objects in mkdirs(). 
Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad54dcf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad54dcf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad54dcf3

Branch: refs/heads/HDFS-10467
Commit: ad54dcf37b04fcd3eaca2969f4f401876f2a68b9
Parents: b6a0d2d
Author: Mingliang Liu 
Authored: Thu Mar 30 13:03:34 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../fs/contract/AbstractContractMkdirTest.java  | 42 
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  3 +-
 2 files changed, 43 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad54dcf3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
index 427b0e9..71d2706 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
@@ -126,4 +126,46 @@ public abstract class AbstractContractMkdirTest extends 
AbstractFSContractTestBa
 assertPathExists("check path existence without trailing slash failed",
 path("testmkdir/b"));
   }
+
+  @Test
+  public void testMkdirsPopulatingAllNonexistentAncestors() throws IOException 
{
+describe("Verify mkdir will populate all its non-existent ancestors");
+final FileSystem fs = getFileSystem();
+
+final Path parent = path("testMkdirsPopulatingAllNonexistentAncestors");
+assertTrue(fs.mkdirs(parent));
+assertPathExists(parent + " should exist before making nested dir", 
parent);
+
+Path nested = path(parent + "/a/b/c/d/e/f/g/h/i/j/k/L");
+assertTrue(fs.mkdirs(nested));
+while (nested != null && !nested.equals(parent) && !nested.isRoot()) {
+  assertPathExists(nested + " nested dir should exist", nested);
+  nested = nested.getParent();
+}
+  }
+
+  @Test
+  public void testMkdirsDoesNotRemoveParentDirectories() throws IOException {
+describe("Verify mkdir will make its parent existent");
+final FileSystem fs = getFileSystem();
+
+final Path parent = path("testMkdirsDoesNotRemoveParentDirectories");
+assertTrue(fs.mkdirs(parent));
+
+Path p = parent;
+for (int i = 0; i < 10; i++) {
+  assertTrue(fs.mkdirs(p));
+  assertPathExists(p + " should exist after mkdir(" + p + ")", p);
+  p = path(p + "/dir-" + i);
+}
+
+// After mkdirs(sub-directory), its parent directory still exists
+p = p.getParent();
+while (p != null && !p.equals(parent) && !p.isRoot()) {
+  assertPathExists("Path " + p + " should exist", p);
+  assertIsDirectory(p);
+  p = p.getParent();
+}
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad54dcf3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index b17281b..9eb5575 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1525,8 +1525,6 @@ public class S3AFileSystem extends FileSystem {
* @throws IOException other IO problems
* @throws AmazonClientException on failures inside the AWS SDK
*/
-  // TODO: If we have created an empty file at /foo/bar and we then call
-  // mkdirs for /foo/bar/baz/roo what happens to the empty file /foo/bar/?
   private boolean innerMkdirs(Path f, FsPermission permission)
   throws IOException, FileAlreadyExistsException, AmazonClientException {
 LOG.debug("Making directory: {}", f);
@@ -1561,6 +1559,7 @@ public class S3AFileSystem extends FileSystem {
 
   String key = pathToKey(f);
   createFakeDirectory(key);
+  deleteUnnecessaryFakeDirectories(f.getParent());
   return true;
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[37/45] hadoop git commit: YARN-3760. FSDataOutputStream leak in AggregatedLogFormat.LogWriter.close(). Contributed by Haibo Chen.

2017-04-17 Thread inigoiri
YARN-3760. FSDataOutputStream leak in AggregatedLogFormat.LogWriter.close(). 
Contributed by Haibo Chen.

(cherry picked from commit c26ccf1adb3a72df3f68e1150b86b813c691203a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd1ec752
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd1ec752
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd1ec752

Branch: refs/heads/HDFS-10467
Commit: fd1ec7523bcc93cf8be0e510aefd7f9ad7507b99
Parents: 550b896
Author: Junping Du 
Authored: Wed Apr 12 09:34:34 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../hadoop/yarn/logaggregation/AggregatedLogFormat.java  | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd1ec752/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 8d86967..0aa318c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -541,14 +541,15 @@ public class AggregatedLogFormat {
 
 @Override
 public void close() {
-  if (writer != null) {
-try {
+  try {
+if (writer != null) {
   writer.close();
-} catch (IOException e) {
-  LOG.warn("Exception closing writer", e);
 }
+  } catch (Exception e) {
+LOG.warn("Exception closing writer", e);
+  } finally {
+IOUtils.closeStream(this.fsDataOStream);
   }
-  IOUtils.closeStream(fsDataOStream);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/45] hadoop git commit: HADOOP-14248. Retire SharedInstanceProfileCredentialsProvider in trunk. Contributed by Mingliang Liu.

2017-04-17 Thread inigoiri
HADOOP-14248. Retire SharedInstanceProfileCredentialsProvider in trunk. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a631172e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a631172e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a631172e

Branch: refs/heads/HDFS-10467
Commit: a631172ed9ae5ee1bda809f602eaed68e00614fa
Parents: e2d6656
Author: Chris Nauroth 
Authored: Wed Apr 12 10:02:13 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:02 2017 -0700

--
 .../src/main/resources/core-default.xml |  9 +--
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  8 +--
 ...haredInstanceProfileCredentialsProvider.java | 67 
 .../src/site/markdown/tools/hadoop-aws/index.md | 33 +++---
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  4 +-
 5 files changed, 13 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a631172e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 03e4996..4f37c65 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -955,13 +955,8 @@
 configuration of AWS access key ID and secret access key in
 environment variables named AWS_ACCESS_KEY_ID and
 AWS_SECRET_ACCESS_KEY, as documented in the AWS SDK.
-3. org.apache.hadoop.fs.s3a.SharedInstanceProfileCredentialsProvider:
-a shared instance of
-com.amazonaws.auth.InstanceProfileCredentialsProvider from the AWS
-SDK, which supports use of instance profile credentials if running
-in an EC2 VM.  Using this shared instance potentially reduces load
-on the EC2 instance metadata service for multi-threaded
-applications.
+3. com.amazonaws.auth.InstanceProfileCredentialsProvider: supports use
+of instance profile credentials if running in an EC2 VM.
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a631172e/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 6a11699..5ff9321 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -339,15 +339,9 @@ public final class S3AUtils {
   credentials.add(new BasicAWSCredentialsProvider(
   creds.getUser(), creds.getPassword()));
   credentials.add(new EnvironmentVariableCredentialsProvider());
-  credentials.add(
-  SharedInstanceProfileCredentialsProvider.getInstance());
+  credentials.add(InstanceProfileCredentialsProvider.getInstance());
 } else {
   for (Class aClass : awsClasses) {
-if (aClass == InstanceProfileCredentialsProvider.class) {
-  LOG.debug("Found {}, but will use {} instead.", aClass.getName(),
-  SharedInstanceProfileCredentialsProvider.class.getName());
-  aClass = SharedInstanceProfileCredentialsProvider.class;
-}
 credentials.add(createAWSCredentialProvider(conf, aClass));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a631172e/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
deleted file mode 100644
index cbc0787..000
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- 

[38/45] hadoop git commit: HDFS-10996. Ability to specify per-file EC policy at create time. Contributed by SammiChen.

2017-04-17 Thread inigoiri
HDFS-10996. Ability to specify per-file EC policy at create time. Contributed 
by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/550b8962
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/550b8962
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/550b8962

Branch: refs/heads/HDFS-10467
Commit: 550b896245846389b238ffb118da841a60e23119
Parents: fdb1cf4
Author: Andrew Wang 
Authored: Wed Apr 12 12:27:34 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 23 -
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  5 +-
 .../hadoop/hdfs/DistributedFileSystem.java  | 34 +---
 .../hadoop/hdfs/protocol/ClientProtocol.java|  6 ++-
 .../ClientNamenodeProtocolTranslatorPB.java |  5 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  3 +-
 .../server/namenode/FSDirErasureCodingOp.java   | 54 +---
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 17 --
 .../hdfs/server/namenode/FSNamesystem.java  | 16 +++---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 +--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |  3 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  3 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 45 
 .../apache/hadoop/hdfs/TestFileCreation.java|  2 +-
 .../java/org/apache/hadoop/hdfs/TestLease.java  |  3 +-
 .../server/namenode/NNThroughputBenchmark.java  | 16 +++---
 .../hdfs/server/namenode/TestAddBlockRetry.java |  4 +-
 ...stBlockPlacementPolicyRackFaultTolerant.java |  4 +-
 .../TestDefaultBlockPlacementPolicy.java|  2 +-
 .../server/namenode/TestNamenodeRetryCache.java | 17 +++---
 .../namenode/ha/TestRetryCacheWithHA.java   |  3 +-
 23 files changed, 200 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/550b8962/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 5bc38e8..ef49950 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1190,13 +1190,31 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   long blockSize, Progressable progress, int buffersize,
   ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes)
   throws IOException {
+return create(src, permission, flag, createParent, replication, blockSize,
+progress, buffersize, checksumOpt, favoredNodes, null);
+  }
+
+
+  /**
+   * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, 
long,
+   * Progressable, int, ChecksumOpt, InetSocketAddress[])} with the addition of
+   * ecPolicyName that is used to specify a specific erasure coding policy
+   * instead of inheriting any policy from this new file's parent directory.
+   * This policy will be persisted in HDFS. A value of null means inheriting
+   * parent groups' whatever policy.
+   */
+  public DFSOutputStream create(String src, FsPermission permission,
+  EnumSet flag, boolean createParent, short replication,
+  long blockSize, Progressable progress, int buffersize,
+  ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes,
+  String ecPolicyName) throws IOException {
 checkOpen();
 final FsPermission masked = applyUMask(permission);
 LOG.debug("{}: masked={}", src, masked);
 final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
 src, masked, flag, createParent, replication, blockSize, progress,
 dfsClientConf.createChecksum(checksumOpt),
-getFavoredNodesStr(favoredNodes));
+getFavoredNodesStr(favoredNodes), ecPolicyName);
 beginFileLease(result.getFileId(), result);
 return result;
   }
@@ -1249,7 +1267,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (result == null) {
   DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
   result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
-  flag, createParent, replication, blockSize, progress, checksum, 
null);
+  flag, createParent, replication, blockSize, progress, checksum,
+  null, null);
 }
 

[12/45] hadoop git commit: HDFS-11623. Move system erasure coding policies into hadoop-hdfs-client.

2017-04-17 Thread inigoiri
HDFS-11623. Move system erasure coding policies into hadoop-hdfs-client.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57e97ce8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57e97ce8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57e97ce8

Branch: refs/heads/HDFS-10467
Commit: 57e97ce8bdef01031e8b4db7078262895a0e1d48
Parents: 00826be
Author: Andrew Wang 
Authored: Fri Apr 7 16:46:28 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |   6 -
 .../protocol/SystemErasureCodingPolicies.java   | 121 +++
 .../org/apache/hadoop/test/TestHdfsHelper.java  |   5 +-
 .../namenode/ErasureCodingPolicyManager.java|  75 +---
 .../server/namenode/FSDirErasureCodingOp.java   |   6 +-
 .../server/namenode/FSImageFormatPBINode.java   |   3 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   8 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   5 +-
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |   6 +-
 .../TestDFSRSDefault10x4StripedInputStream.java |   7 +-
 ...TestDFSRSDefault10x4StripedOutputStream.java |   7 +-
 ...fault10x4StripedOutputStreamWithFailure.java |   7 +-
 .../hdfs/TestDFSXORStripedInputStream.java  |   7 +-
 .../hdfs/TestDFSXORStripedOutputStream.java |   7 +-
 ...estDFSXORStripedOutputStreamWithFailure.java |   7 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  26 ++--
 .../TestUnsetAndChangeDirectoryEcPolicy.java|  11 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/namenode/TestEnabledECPolicies.java  |  17 +--
 .../hdfs/server/namenode/TestFSImage.java   |  13 +-
 .../hdfs/server/namenode/TestINodeFile.java |   7 +-
 .../server/namenode/TestStripedINodeFile.java   |   8 +-
 .../TestOfflineImageViewer.java |  11 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  |   4 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |  10 +-
 25 files changed, 212 insertions(+), 176 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57e97ce8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index d2209a4..0d31bc4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -144,12 +144,6 @@ public final class HdfsConstants {
 ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE
   }
 
-  public static final byte RS_6_3_POLICY_ID = 1;
-  public static final byte RS_3_2_POLICY_ID = 2;
-  public static final byte RS_6_3_LEGACY_POLICY_ID = 3;
-  public static final byte XOR_2_1_POLICY_ID = 4;
-  public static final byte RS_10_4_POLICY_ID = 5;
-
   /* Hidden constructor */
   protected HdfsConstants() {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57e97ce8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
new file mode 100644
index 000..2cd838b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 

[40/45] hadoop git commit: YARN-6433. Only accessible cgroup mount directories should be selected for a controller. (Miklos Szegedi via kasha)

2017-04-17 Thread inigoiri
YARN-6433. Only accessible cgroup mount directories should be selected for a 
controller. (Miklos Szegedi via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/859b3a03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/859b3a03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/859b3a03

Branch: refs/heads/HDFS-10467
Commit: 859b3a03dce10ebc2c81e6422f3fe73b06fbfbe7
Parents: b326822
Author: Karthik Kambatla 
Authored: Fri Apr 14 15:07:14 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:03 2017 -0700

--
 .../containermanager/linux/resources/CGroupsHandlerImpl.java  | 7 ++-
 .../linux/resources/TestCGroupsHandlerImpl.java   | 5 +
 2 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/859b3a03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 0b29abc..d5295c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -236,7 +236,12 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   Map entries) {
 for (Map.Entry e : entries.entrySet()) {
   if (e.getValue().contains(controller)) {
-return e.getKey();
+if (new File(e.getKey()).canRead()) {
+  return e.getKey();
+} else {
+  LOG.warn(String.format(
+  "Skipping inaccessible cgroup mount point %s", e.getKey()));
+}
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/859b3a03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
index 38dc34f..4c0829e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
@@ -252,6 +252,10 @@ public class TestCGroupsHandlerImpl {
 String cpuMtabContent =
 "none " + parentDir.getAbsolutePath()
 + "/cpu cgroup rw,relatime,cpu 0 0\n";
+// Mark an empty directory called 'cp' cgroup. It is processed before 'cpu'
+String cpuMtabContentMissing =
+"none " + parentDir.getAbsolutePath()
++ "/cp cgroup rw,relatime,cpu 0 0\n";
 String blkioMtabContent =
 "none " + parentDir.getAbsolutePath()
 + "/blkio cgroup rw,relatime,blkio 0 0\n";
@@ -264,6 +268,7 @@ public class TestCGroupsHandlerImpl {
   }
 }
 FileWriter mtabWriter = new FileWriter(mockMtab.getAbsoluteFile());
+mtabWriter.write(cpuMtabContentMissing);
 mtabWriter.write(cpuMtabContent);
 mtabWriter.write(blkioMtabContent);
 mtabWriter.close();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/45] hadoop git commit: HDFS-11565. Use compact identifiers for built-in ECPolicies in HdfsFileStatus.

2017-04-17 Thread inigoiri
HDFS-11565. Use compact identifiers for built-in ECPolicies in HdfsFileStatus.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdb1cf40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdb1cf40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdb1cf40

Branch: refs/heads/HDFS-10467
Commit: fdb1cf400845f61f00ab14d228b02df37e7eb7ac
Parents: 7608d5c
Author: Andrew Wang 
Authored: Wed Apr 12 12:24:32 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:02 2017 -0700

--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 32 ++--
 .../src/main/proto/hdfs.proto   |  6 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 86 +++-
 3 files changed, 113 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdb1cf40/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 98d7ef9..e703a94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
@@ -2641,20 +2642,37 @@ public class PBHelperClient {
   }
 
   public static ErasureCodingPolicy convertErasureCodingPolicy(
-  ErasureCodingPolicyProto policy) {
-return new ErasureCodingPolicy(policy.getName(),
-convertECSchema(policy.getSchema()),
-policy.getCellSize(), (byte) policy.getId());
+  ErasureCodingPolicyProto proto) {
+final byte id = (byte) (proto.getId() & 0xFF);
+ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByID(id);
+if (policy == null) {
+  // If it's not a built-in policy, populate from the optional PB fields.
+  // The optional fields are required in this case.
+  Preconditions.checkArgument(proto.hasName(),
+  "Missing name field in ErasureCodingPolicy proto");
+  Preconditions.checkArgument(proto.hasSchema(),
+  "Missing schema field in ErasureCodingPolicy proto");
+  Preconditions.checkArgument(proto.hasCellSize(),
+  "Missing cellsize field in ErasureCodingPolicy proto");
+
+  return new ErasureCodingPolicy(proto.getName(),
+  convertECSchema(proto.getSchema()),
+  proto.getCellSize(), id);
+}
+return policy;
   }
 
   public static ErasureCodingPolicyProto convertErasureCodingPolicy(
   ErasureCodingPolicy policy) {
 ErasureCodingPolicyProto.Builder builder = ErasureCodingPolicyProto
 .newBuilder()
-.setName(policy.getName())
-.setSchema(convertECSchema(policy.getSchema()))
-.setCellSize(policy.getCellSize())
 .setId(policy.getId());
+// If it's not a built-in policy, need to set the optional fields.
+if (SystemErasureCodingPolicies.getByID(policy.getId()) == null) {
+  builder.setName(policy.getName())
+  .setSchema(convertECSchema(policy.getSchema()))
+  .setCellSize(policy.getCellSize());
+}
 return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdb1cf40/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 99a9e68..3e3994c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -352,9 +352,9 @@ message ECSchemaProto {
 }
 
 message ErasureCodingPolicyProto {
-  required string name = 1;
-  required ECSchemaProto schema = 2;
-  required uint32 cellSize = 3;
+  optional string name = 1;
+  optional ECSchemaProto schema = 2;
+  

[27/45] hadoop git commit: HDFS-11558. BPServiceActor thread name is too long. Contributed by Xiaobing Zhou

2017-04-17 Thread inigoiri
HDFS-11558. BPServiceActor thread name is too long. Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99ae9457
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99ae9457
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99ae9457

Branch: refs/heads/HDFS-10467
Commit: 99ae9457c9d6c08ddadf1990f30ca162f619cff0
Parents: 8b28622
Author: Mingliang Liu 
Authored: Tue Apr 11 15:28:59 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:01 2017 -0700

--
 .../hdfs/server/datanode/BPOfferService.java| 19 ---
 .../hdfs/server/datanode/BPServiceActor.java| 25 +++-
 .../hdfs/server/datanode/BlockPoolManager.java  |  8 ---
 .../server/datanode/TestBPOfferService.java |  2 +-
 .../server/datanode/TestBlockPoolManager.java   |  4 +++-
 5 files changed, 39 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99ae9457/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 00e6b3e..e0daca7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -70,7 +70,8 @@ class BPOfferService {
* handshake.
*/
   volatile DatanodeRegistration bpRegistration;
-  
+
+  private final String nameserviceId;
   private final DataNode dn;
 
   /**
@@ -120,12 +121,16 @@ class BPOfferService {
 mWriteLock.unlock();
   }
 
-  BPOfferService(List nnAddrs,
-  List lifelineNnAddrs, DataNode dn) {
+  BPOfferService(
+  final String nameserviceId,
+  List nnAddrs,
+  List lifelineNnAddrs,
+  DataNode dn) {
 Preconditions.checkArgument(!nnAddrs.isEmpty(),
 "Must pass at least one NN.");
 Preconditions.checkArgument(nnAddrs.size() == lifelineNnAddrs.size(),
 "Must pass same number of NN addresses and lifeline addresses.");
+this.nameserviceId = nameserviceId;
 this.dn = dn;
 
 for (int i = 0; i < nnAddrs.size(); ++i) {
@@ -170,6 +175,14 @@ class BPOfferService {
 return false;
   }
 
+  /**
+   * Gets nameservice id to which this {@link BPOfferService} maps to.
+   * @return nameservice id, which can be null.
+   */
+  String getNameserviceId() {
+return nameserviceId;
+  }
+
   String getBlockPoolId() {
 readLock();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99ae9457/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index ddc28b7..21e2a3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -25,7 +25,6 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -279,7 +278,10 @@ class BPServiceActor implements Runnable {
 // This also initializes our block pool in the DN if we are
 // the first NN connection for this BP.
 bpos.verifyAndSetNamespaceInfo(this, nsInfo);
-
+
+/* set thread name again to include NamespaceInfo when it's available. */
+this.bpThread.setName(formatThreadName("heartbeating", nnAddr));
+
 // Second phase of the handshake with the NN.
 register(nsInfo);
   }
@@ -547,14 +549,15 @@ class BPServiceActor implements Runnable {
   lifelineSender.start();
 }
   }
-  
-  private String formatThreadName(String action, InetSocketAddress addr) {
-Collection dataDirs =
-DataNode.getStorageLocations(dn.getConf());
-return "DataNode: [" + dataDirs.toString() + "]  " +
-action + " to " + addr;
+
+  private String formatThreadName(
+  final String action,
+  final InetSocketAddress addr) {
+final String prefix = bpos.getBlockPoolId() != null ? 

[24/45] hadoop git commit: HADOOP-13665. Erasure Coding codec should support fallback coder. Contributed by Kai Sasaki.

2017-04-17 Thread inigoiri
HADOOP-13665. Erasure Coding codec should support fallback coder. Contributed 
by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28dde869
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28dde869
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28dde869

Branch: refs/heads/HDFS-10467
Commit: 28dde86984d5cb3764b552f315e87aeb89a8ecb9
Parents: ed66d9b
Author: Wei-Chiu Chuang 
Authored: Tue Apr 11 07:31:29 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:01 2017 -0700

--
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 135 +++
 .../src/main/resources/core-default.xml |  25 ++--
 .../erasurecode/TestCodecRawCoderMapping.java   |  71 +-
 .../coder/TestHHXORErasureCoder.java|   2 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |   2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   2 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   2 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |   2 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |   2 +-
 .../TestUnsetAndChangeDirectoryEcPolicy.java|   2 +-
 10 files changed, 164 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28dde869/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
index 861451a..0c66df6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
@@ -18,6 +18,10 @@
 package org.apache.hadoop.io.erasurecode;
 
 import com.google.common.base.Preconditions;
+import com.google.common.base.Splitter;
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.erasurecode.codec.ErasureCodec;
@@ -26,6 +30,8 @@ import org.apache.hadoop.io.erasurecode.codec.RSErasureCodec;
 import org.apache.hadoop.io.erasurecode.codec.XORErasureCodec;
 import org.apache.hadoop.io.erasurecode.coder.ErasureDecoder;
 import org.apache.hadoop.io.erasurecode.coder.ErasureEncoder;
+import 
org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
+import 
org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory;
 import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
 import 
org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
@@ -35,6 +41,7 @@ import 
org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
 
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
+import java.util.Map;
 
 /**
  * A codec & coder utility to help create coders conveniently.
@@ -49,41 +56,50 @@ import java.lang.reflect.InvocationTargetException;
 @InterfaceAudience.Private
 public final class CodecUtil {
 
+  private static final Log LOG = LogFactory.getLog(CodecUtil.class);
+
+  public static final String IO_ERASURECODE_CODEC = "io.erasurecode.codec.";
+
   /** Erasure coder XOR codec. */
   public static final String IO_ERASURECODE_CODEC_XOR_KEY =
-  "io.erasurecode.codec.xor";
+  IO_ERASURECODE_CODEC + "xor";
   public static final String IO_ERASURECODE_CODEC_XOR =
   XORErasureCodec.class.getCanonicalName();
   /** Erasure coder Reed-Solomon codec. */
   public static final String IO_ERASURECODE_CODEC_RS_KEY =
-  "io.erasurecode.codec.rs";
+  IO_ERASURECODE_CODEC + "rs";
   public static final String IO_ERASURECODE_CODEC_RS =
   RSErasureCodec.class.getCanonicalName();
   /** Erasure coder hitch hiker XOR codec. */
   public static final String IO_ERASURECODE_CODEC_HHXOR_KEY =
-  "io.erasurecode.codec.hhxor";
+  IO_ERASURECODE_CODEC + "hhxor";
   public static final String IO_ERASURECODE_CODEC_HHXOR =
   HHXORErasureCodec.class.getCanonicalName();
 
-  /** Supported erasure codec classes. */
-
-  /** Raw coder factory for the RS codec. */
-  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
-  "io.erasurecode.codec.rs.rawcoder";
-  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT =
-  RSRawErasureCoderFactory.class.getCanonicalName();
-
-  /** 

[35/45] hadoop git commit: YARN-6450. TestContainerManagerWithLCE requires override for each new test added to ContainerManagerTest? Contributed by Jason Lowe.

2017-04-17 Thread inigoiri
YARN-6450. TestContainerManagerWithLCE requires override for each new test 
added to ContainerManagerTest? Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2d6656e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2d6656e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2d6656e

Branch: refs/heads/HDFS-10467
Commit: e2d6656e274c5c423532bd6995a294832e439ad8
Parents: d2f889b
Author: Eric Payne 
Authored: Wed Apr 12 10:20:08 2017 -0500
Committer: Inigo 
Committed: Mon Apr 17 11:17:02 2017 -0700

--
 .../TestContainerManagerWithLCE.java| 310 +--
 1 file changed, 3 insertions(+), 307 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2d6656e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
index 5dc4717..028db6a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
 import org.junit.After;
+import org.junit.Assume;
 
 public class TestContainerManagerWithLCE extends TestContainerManager {
 
@@ -51,11 +52,8 @@ public class TestContainerManagerWithLCE extends 
TestContainerManager {
 
   @Override
   public void setup() throws IOException {
-// Don't run the test if the binary is not available.
-if (!shouldRunTest()) {
-  LOG.info("LCE binary path is not passed. Not running the test");
-  return;
-}
+Assume.assumeTrue("LCE binary path is not passed. Not running the test",
+shouldRunTest());
 super.setup();
 localFS.setPermission(new Path(localDir.getCanonicalPath()),
 new FsPermission(
@@ -73,308 +71,6 @@ public class TestContainerManagerWithLCE extends 
TestContainerManager {
 }
   }
 
-  @Override
-  public void testContainerSetup() throws Exception, InterruptedException,
-  YarnException {
-// Don't run the test if the binary is not available.
-if (!shouldRunTest()) {
-  LOG.info("LCE binary path is not passed. Not running the test");
-  return;
-}
-LOG.info("Running testContainerSetup");
-super.testContainerSetup();
-  }
-
-  @Override
-  public void testContainerManagerInitialization() throws IOException {
-// Don't run the test if the binary is not available.
-if (!shouldRunTest()) {
-  LOG.info("LCE binary path is not passed. Not running the test");
-  return;
-}
-LOG.info("Running testContainerManagerInitialization");
-super.testContainerManagerInitialization();
-  }
-
-  @Override
-  public void testContainerLaunchAndStop() throws IOException,
-  InterruptedException, YarnException {
-// Don't run the test if the binary is not available.
-if (!shouldRunTest()) {
-  LOG.info("LCE binary path is not passed. Not running the test");
-  return;
-}
-LOG.info("Running testContainerLaunchAndStop");
-super.testContainerLaunchAndStop();
-  }
-  
-  @Override
-  public void testContainerLaunchAndExitSuccess() throws IOException,
-  InterruptedException, YarnException {
-// Don't run the test if the binary is not available.
-if (!shouldRunTest()) {
-  LOG.info("LCE binary path is not passed. Not running the test");
-  return;
-}
-LOG.info("Running testContainerLaunchAndExitSuccess");
-super.testContainerLaunchAndExitSuccess();
-  }
-
-  @Override
-  public void testContainerLaunchAndExitFailure() throws IOException,
-  InterruptedException, YarnException {
-// Don't run the test if the binary is not available.
-if (!shouldRunTest()) {
-  LOG.info("LCE binary path is not passed. Not running the test");
-  return;
-}
-LOG.info("Running testContainerLaunchAndExitFailure");
-super.testContainerLaunchAndExitFailure();
-  

[22/45] hadoop git commit: YARN-6461. TestRMAdminCLI has very low test timeouts. Contributed by Eric Badger

2017-04-17 Thread inigoiri
YARN-6461. TestRMAdminCLI has very low test timeouts. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed66d9ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed66d9ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed66d9ba

Branch: refs/heads/HDFS-10467
Commit: ed66d9ba695efc584e15e861366657b7a70f913b
Parents: 5fcdc25
Author: Jason Lowe 
Authored: Tue Apr 11 09:16:11 2017 -0500
Committer: Inigo 
Committed: Mon Apr 17 11:17:01 2017 -0700

--
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  | 32 ++--
 1 file changed, 16 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed66d9ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
index a2f5330..013c227 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
@@ -187,14 +187,14 @@ public class TestRMAdminCLI {
 dummyNodeLabelsManager.init(conf);
   }
   
-  @Test(timeout=500)
+  @Test
   public void testRefreshQueues() throws Exception {
 String[] args = { "-refreshQueues" };
 assertEquals(0, rmAdminCLI.run(args));
 verify(admin).refreshQueues(any(RefreshQueuesRequest.class));
   }
 
-  @Test(timeout=500)
+  @Test
   public void testRefreshUserToGroupsMappings() throws Exception {
 String[] args = { "-refreshUserToGroupsMappings" };
 assertEquals(0, rmAdminCLI.run(args));
@@ -202,7 +202,7 @@ public class TestRMAdminCLI {
 any(RefreshUserToGroupsMappingsRequest.class));
   }
 
-  @Test(timeout=500)
+  @Test
   public void testRefreshSuperUserGroupsConfiguration() throws Exception {
 String[] args = { "-refreshSuperUserGroupsConfiguration" };
 assertEquals(0, rmAdminCLI.run(args));
@@ -210,14 +210,14 @@ public class TestRMAdminCLI {
 any(RefreshSuperUserGroupsConfigurationRequest.class));
   }
 
-  @Test(timeout=500)
+  @Test
   public void testRefreshAdminAcls() throws Exception {
 String[] args = { "-refreshAdminAcls" };
 assertEquals(0, rmAdminCLI.run(args));
 verify(admin).refreshAdminAcls(any(RefreshAdminAclsRequest.class));
   }
 
-  @Test(timeout = 5000)
+  @Test
   public void testRefreshClusterMaxPriority() throws Exception {
 String[] args = { "-refreshClusterMaxPriority" };
 assertEquals(0, rmAdminCLI.run(args));
@@ -225,14 +225,14 @@ public class TestRMAdminCLI {
 any(RefreshClusterMaxPriorityRequest.class));
   }
 
-  @Test(timeout=500)
+  @Test
   public void testRefreshServiceAcl() throws Exception {
 String[] args = { "-refreshServiceAcl" };
 assertEquals(0, rmAdminCLI.run(args));
 verify(admin).refreshServiceAcls(any(RefreshServiceAclsRequest.class));
   }
 
-  @Test(timeout=500)
+  @Test
   public void testUpdateNodeResource() throws Exception {
 String nodeIdStr = "0.0.0.0:0";
 int memSize = 2048;
@@ -256,7 +256,7 @@ public class TestRMAdminCLI {
 resource);
   }
 
-  @Test(timeout=500)
+  @Test
   public void testUpdateNodeResourceWithInvalidValue() throws Exception {
 String nodeIdStr = "0.0.0.0:0";
 int memSize = -2048;
@@ -270,7 +270,7 @@ public class TestRMAdminCLI {
 any(UpdateNodeResourceRequest.class));
   }
 
-  @Test(timeout=500)
+  @Test
   public void testRefreshNodes() throws Exception {
 String[] args = { "-refreshNodes" };
 assertEquals(0, rmAdminCLI.run(args));
@@ -373,7 +373,7 @@ public class TestRMAdminCLI {
 assertEquals(-1, rmAdminCLI.run(invalidTrackingArgs));
   }
 
-  @Test(timeout=500)
+  @Test
   public void testGetGroups() throws Exception {
 when(admin.getGroupsForUser(eq("admin"))).thenReturn(
 new String[] {"group1", "group2"});
@@ -395,7 +395,7 @@ public class TestRMAdminCLI {
 }
   }
 
-  @Test(timeout = 500)
+  @Test
   public void testTransitionToActive() throws Exception {
 String[] args = {"-transitionToActive", "rm1"};
 
@@ -414,7 +414,7 @@ public class TestRMAdminCLI {
 verify(haadmin, times(1)).getServiceStatus();
   }
 
-  @Test(timeout = 500)
+  @Test
   public void testTransitionToStandby() throws Exception {
 String[] args = {"-transitionToStandby", "rm1"};
 
@@ -431,7 +431,7 @@ public class TestRMAdminCLI {
  

[11/45] hadoop git commit: YARN-6298. Metric preemptCall is not used in new preemption. (Yufei Gu via kasha)

2017-04-17 Thread inigoiri
YARN-6298. Metric preemptCall is not used in new preemption. (Yufei Gu via 
kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b096340c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b096340c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b096340c

Branch: refs/heads/HDFS-10467
Commit: b096340cee2bc728cb8d1c948bd4f2a99aca0b2a
Parents: 57e97ce
Author: Karthik Kambatla 
Authored: Fri Apr 7 17:31:50 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 .../server/resourcemanager/scheduler/fair/FSOpDurations.java | 8 
 1 file changed, 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b096340c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
index f6d843a..c841ca7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSOpDurations.java
@@ -50,9 +50,6 @@ public class FSOpDurations implements MetricsSource {
   @Metric("Duration for a update thread run")
   MutableRate updateThreadRun;
 
-  @Metric("Duration for a preempt call")
-  MutableRate preemptCall;
-
   private static final MetricsInfo RECORD_INFO =
   info("FSOpDurations", "Durations of FairScheduler calls or thread-runs");
 
@@ -84,7 +81,6 @@ public class FSOpDurations implements MetricsSource {
 continuousSchedulingRun.setExtended(isExtended);
 nodeUpdateCall.setExtended(isExtended);
 updateThreadRun.setExtended(isExtended);
-preemptCall.setExtended(isExtended);
 
 INSTANCE.isExtended = isExtended;
   }
@@ -106,10 +102,6 @@ public class FSOpDurations implements MetricsSource {
 updateThreadRun.add(value);
   }
 
-  public void addPreemptCallDuration(long value) {
-preemptCall.add(value);
-  }
-
   @VisibleForTesting
   public boolean hasUpdateThreadRunChanged() {
 return updateThreadRun.changed();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/45] hadoop git commit: YARN-6195. Export UsedCapacity and AbsoluteUsedCapacity to JMX. Contributed by Benson Qiu

2017-04-17 Thread inigoiri
YARN-6195. Export UsedCapacity and AbsoluteUsedCapacity to JMX. Contributed by 
Benson Qiu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fcdc25b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fcdc25b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fcdc25b

Branch: refs/heads/HDFS-10467
Commit: 5fcdc25b90cc7ffd19e44dfb9150cce154bc8889
Parents: 3eeac57
Author: Jason Lowe 
Authored: Tue Apr 11 08:44:18 2017 -0500
Committer: Inigo 
Committed: Mon Apr 17 11:17:01 2017 -0700

--
 .../hadoop/metrics2/lib/MetricsRegistry.java| 26 +++
 .../hadoop/metrics2/lib/MutableGaugeFloat.java  | 80 
 .../metrics2/lib/MutableMetricsFactory.java |  3 +
 .../metrics2/lib/TestMetricsAnnotations.java|  3 +
 .../metrics2/lib/TestMetricsRegistry.java   |  4 +-
 .../hadoop/metrics2/lib/TestMutableMetrics.java |  2 +
 .../scheduler/capacity/AbstractCSQueue.java | 20 ++---
 .../scheduler/capacity/CSQueue.java | 14 
 .../scheduler/capacity/CSQueueMetrics.java  | 21 +
 .../scheduler/capacity/CSQueueUtils.java| 32 +---
 .../scheduler/capacity/LeafQueue.java   |  2 +-
 .../scheduler/capacity/ParentQueue.java |  2 +-
 .../scheduler/capacity/ReservationQueue.java|  2 +-
 13 files changed, 168 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fcdc25b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
index 0af45a6..7070869 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsRegistry.java
@@ -143,6 +143,7 @@ public class MetricsRegistry {
   public MutableGaugeInt newGauge(String name, String desc, int iVal) {
 return newGauge(Interns.info(name, desc), iVal);
   }
+
   /**
* Create a mutable integer gauge
* @param info  metadata of the metric
@@ -181,6 +182,30 @@ public class MetricsRegistry {
   }
 
   /**
+   * Create a mutable float gauge
+   * @param name  of the metric
+   * @param desc  metric description
+   * @param iVal  initial value
+   * @return a new gauge object
+   */
+  public MutableGaugeFloat newGauge(String name, String desc, float iVal) {
+return newGauge(Interns.info(name, desc), iVal);
+  }
+
+  /**
+   * Create a mutable float gauge
+   * @param info  metadata of the metric
+   * @param iVal  initial value
+   * @return a new gauge object
+   */
+  public synchronized MutableGaugeFloat newGauge(MetricsInfo info, float iVal) 
{
+checkMetricName(info.name());
+MutableGaugeFloat ret = new MutableGaugeFloat(info, iVal);
+metricsMap.put(info.name(), ret);
+return ret;
+  }
+
+  /**
* Create a mutable metric that estimates quantiles of a stream of values
* @param name of the metric
* @param desc metric description
@@ -420,4 +445,5 @@ public class MetricsRegistry {
 .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics())
 .toString();
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fcdc25b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
new file mode 100644
index 000..b16eda2
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeFloat.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 

[13/45] hadoop git commit: HADOOP-14008. Upgrade to Apache Yetus 0.4.0

2017-04-17 Thread inigoiri
HADOOP-14008. Upgrade to Apache Yetus 0.4.0

Signed-off-by: Andrew Wang 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1c3ece8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1c3ece8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1c3ece8

Branch: refs/heads/HDFS-10467
Commit: f1c3ece890b6d3a9c4d2a5cec6ccb89a098bef19
Parents: b096340
Author: Allen Wittenauer 
Authored: Fri Apr 7 09:34:28 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 dev-support/bin/yetus-wrapper | 44 --
 1 file changed, 23 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1c3ece8/dev-support/bin/yetus-wrapper
--
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index ddcc7a5..9f6bb33 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -14,6 +14,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# you must be this high to ride the ride
+if [[ -z "${BASH_VERSINFO[0]}" ]] \
+   || [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \
+   || [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then
+  echo "bash v3.2+ is required. Sorry."
+  exit 1
+fi
+
 set -o pipefail
 
 ## @description  Print a message to stderr
@@ -39,6 +47,7 @@ function yetus_abs
   declare obj=$1
   declare dir
   declare fn
+  declare dirret
 
   if [[ ! -e ${obj} ]]; then
 return 1
@@ -51,7 +60,8 @@ function yetus_abs
   fi
 
   dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
-  if [[ $? = 0 ]]; then
+  dirret=$?
+  if [[ ${dirret} = 0 ]]; then
 echo "${dir}${fn}"
 return 0
   fi
@@ -63,7 +73,7 @@ WANTED="$1"
 shift
 ARGV=("$@")
 
-HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.3.0}
+HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.4.0}
 BIN=$(yetus_abs "${BASH_SOURCE-$0}")
 BINDIR=$(dirname "${BIN}")
 
@@ -85,7 +95,8 @@ if [[ ! -d "${HADOOP_PATCHPROCESS}" ]]; then
 fi
 
 mytmpdir=$(yetus_abs "${HADOOP_PATCHPROCESS}")
-if [[ $? != 0 ]]; then
+ret=$?
+if [[ ${ret} != 0 ]]; then
   yetus_error "yetus-dl: Unable to cwd to ${HADOOP_PATCHPROCESS}"
   exit 1
 fi
@@ -108,15 +119,13 @@ TARBALL="yetus-${HADOOP_YETUS_VERSION}-bin.tar"
 GPGBIN=$(command -v gpg)
 CURLBIN=$(command -v curl)
 
-pushd "${HADOOP_PATCHPROCESS}" >/dev/null
-if [[ $? != 0 ]]; then
+if ! pushd "${HADOOP_PATCHPROCESS}" >/dev/null; then
   yetus_error "ERROR: yetus-dl: Cannot pushd to ${HADOOP_PATCHPROCESS}"
   exit 1
 fi
 
 if [[ -n "${CURLBIN}" ]]; then
-  "${CURLBIN}" -f -s -L -O "${BASEURL}/${TARBALL}.gz"
-  if [[ $? != 0 ]]; then
+  if ! "${CURLBIN}" -f -s -L -O "${BASEURL}/${TARBALL}.gz"; then
 yetus_error "ERROR: yetus-dl: unable to download ${BASEURL}/${TARBALL}.gz"
 exit 1
   fi
@@ -126,40 +135,33 @@ else
 fi
 
 if [[ -n "${GPGBIN}" ]]; then
-  mkdir -p .gpg
-  if [[ $? != 0 ]]; then
+  if ! mkdir -p .gpg; then
 yetus_error "ERROR: yetus-dl: Unable to create ${HADOOP_PATCHPROCESS}/.gpg"
 exit 1
   fi
-  chmod -R 700 .gpg
-  if [[ $? != 0 ]]; then
+  if ! chmod -R 700 .gpg; then
 yetus_error "ERROR: yetus-dl: Unable to chmod ${HADOOP_PATCHPROCESS}/.gpg"
 exit 1
   fi
-  "${CURLBIN}" -s -L -o KEYS_YETUS 
https://dist.apache.org/repos/dist/release/yetus/KEYS
-  if [[ $? != 0 ]]; then
+  if ! "${CURLBIN}" -s -L -o KEYS_YETUS 
https://dist.apache.org/repos/dist/release/yetus/KEYS; then
 yetus_error "ERROR: yetus-dl: unable to fetch 
https://dist.apache.org/repos/dist/release/yetus/KEYS;
 exit 1
   fi
-  "${CURLBIN}" -s -L -O "${BASEURL}/${TARBALL}.gz.asc"
-  if [[ $? != 0 ]]; then
+  if ! "${CURLBIN}" -s -L -O "${BASEURL}/${TARBALL}.gz.asc"; then
 yetus_error "ERROR: yetus-dl: unable to fetch ${BASEURL}/${TARBALL}.gz.asc"
 exit 1
   fi
-  "${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --import 
"${HADOOP_PATCHPROCESS}/KEYS_YETUS" >/dev/null 2>&1
-  if [[ $? != 0 ]]; then
+  if ! "${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --import 
"${HADOOP_PATCHPROCESS}/KEYS_YETUS" >/dev/null 2>&1; then
 yetus_error "ERROR: yetus-dl: gpg unable to import 
${HADOOP_PATCHPROCESS}/KEYS_YETUS"
 exit 1
   fi
-  "${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --verify 
"${TARBALL}.gz.asc" >/dev/null 2>&1
-   if [[ $? != 0 ]]; then
+  if ! "${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --verify 
"${TARBALL}.gz.asc" >/dev/null 2>&1; then
  yetus_error "ERROR: yetus-dl: gpg verify of tarball in 
${HADOOP_PATCHPROCESS} failed"
  exit 1
fi
 fi
 
-gunzip -c "${TARBALL}.gz" | tar xpf -
-if [[ $? != 0 ]]; then
+if ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
   

[08/45] hadoop git commit: HDFS-11633. FSImage failover disables all erasure coding policies. Contributed by Wei-Chiu Chuang.

2017-04-17 Thread inigoiri
HDFS-11633. FSImage failover disables all erasure coding policies. Contributed 
by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00826be9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00826be9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00826be9

Branch: refs/heads/HDFS-10467
Commit: 00826be98f92c7a821d55339a5833d27fe3ffcc6
Parents: 1f1f8c6
Author: Andrew Wang 
Authored: Fri Apr 7 15:46:11 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 .../namenode/ErasureCodingPolicyManager.java|  3 +-
 .../hdfs/server/namenode/TestStartup.java   | 57 +++-
 2 files changed, 58 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00826be9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index c23b034..17b48f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -190,6 +190,7 @@ public final class ErasureCodingPolicyManager {
* Clear and clean up.
*/
   public void clear() {
-enabledPoliciesByName.clear();
+// TODO: we should only clear policies loaded from NN metadata.
+// This is a placeholder for HDFS-7337.
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00826be9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 5da19a7..8c2acf6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -41,6 +41,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -48,10 +49,13 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
@@ -564,7 +568,58 @@ public class TestStartup {
 } finally {
   cluster.shutdown();
 }
-}
+  }
+
+  @Test(timeout=3)
+  public void testCorruptImageFallbackLostECPolicy() throws IOException {
+final ErasureCodingPolicy defaultPolicy = StripedFileTestUtil
+.getDefaultECPolicy();
+final String policy = defaultPolicy.getName();
+final Path f1 = new Path("/f1");
+config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policy);
+
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(config)
+.numDataNodes(0)
+.format(true)
+.build();
+try {
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  // set root directory to use the default ec policy
+  Path srcECDir = new Path("/");
+  fs.setErasureCodingPolicy(srcECDir,
+  defaultPolicy.getName());
+
+  // create a file which will use the default ec policy
+  fs.create(f1);
+  FileStatus fs1 = fs.getFileStatus(f1);
+  assertTrue(fs1.isErasureCoded());
+  ErasureCodingPolicy fs1Policy = fs.getErasureCodingPolicy(f1);
+  

[15/45] hadoop git commit: HADOOP-14066 VersionInfo should be marked as public API

2017-04-17 Thread inigoiri
HADOOP-14066 VersionInfo should be marked as public API


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/616c8c69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/616c8c69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/616c8c69

Branch: refs/heads/HDFS-10467
Commit: 616c8c692316e48c16b4c28e50bfa63c5e58696e
Parents: 7ea8104
Author: Steve Loughran 
Authored: Sat Apr 8 13:41:10 2017 +0100
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 .../org/apache/hadoop/util/VersionInfo.java | 27 +++-
 1 file changed, 15 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/616c8c69/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
index dc8d369..10e2590 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.io.IOUtils;
 /**
  * This class returns build information about Hadoop components.
  */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class VersionInfo {
   private static final Log LOG = LogFactory.getLog(VersionInfo.class);
 
@@ -102,8 +102,8 @@ public class VersionInfo {
   }
   
   /**
-   * Get the subversion revision number for the root directory
-   * @return the revision number, eg. "451451"
+   * Get the Git commit hash of the repository when compiled.
+   * @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a"
*/
   public static String getRevision() {
 return COMMON_VERSION_INFO._getRevision();
@@ -124,7 +124,7 @@ public class VersionInfo {
   public static String getDate() {
 return COMMON_VERSION_INFO._getDate();
   }
-  
+
   /**
* The user that compiled Hadoop.
* @return the username of the user
@@ -132,25 +132,27 @@ public class VersionInfo {
   public static String getUser() {
 return COMMON_VERSION_INFO._getUser();
   }
-  
+
   /**
-   * Get the subversion URL for the root Hadoop directory.
+   * Get the URL for the Hadoop repository.
+   * @return the URL of the Hadoop repository
*/
   public static String getUrl() {
 return COMMON_VERSION_INFO._getUrl();
   }
 
   /**
-   * Get the checksum of the source files from which Hadoop was
-   * built.
-   **/
+   * Get the checksum of the source files from which Hadoop was built.
+   * @return the checksum of the source files
+   */
   public static String getSrcChecksum() {
 return COMMON_VERSION_INFO._getSrcChecksum();
   }
 
   /**
-   * Returns the buildVersion which includes version, 
-   * revision, user and date. 
+   * Returns the buildVersion which includes version,
+   * revision, user and date.
+   * @return the buildVersion
*/
   public static String getBuildVersion(){
 return COMMON_VERSION_INFO._getBuildVersion();
@@ -158,6 +160,7 @@ public class VersionInfo {
 
   /**
* Returns the protoc version used for the build.
+   * @return the protoc version
*/
   public static String getProtocVersion(){
 return COMMON_VERSION_INFO._getProtocVersion();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/45] hadoop git commit: YARN-6344. Add parameter for rack locality delay in CapacityScheduler. (kkaranasos)

2017-04-17 Thread inigoiri
YARN-6344. Add parameter for rack locality delay in CapacityScheduler. 
(kkaranasos)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7288294
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7288294
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7288294

Branch: refs/heads/HDFS-10467
Commit: a7288294f56bcc71b9a8f0d377bfdee3bf555d10
Parents: 908cf41
Author: Konstantinos Karanasos 
Authored: Mon Apr 10 15:25:33 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:17:00 2017 -0700

--
 .../conf/capacity-scheduler.xml |  24 ++-
 .../scheduler/SchedulerApplicationAttempt.java  |   5 +
 .../CapacitySchedulerConfiguration.java |  12 ++
 .../scheduler/capacity/LeafQueue.java   |  16 +-
 .../allocator/RegularContainerAllocator.java|  41 +++--
 .../scheduler/capacity/TestLeafQueue.java   | 159 ++-
 6 files changed, 235 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7288294/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
index 47db01f..785ed04 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
@@ -111,9 +111,27 @@
 40
 
   Number of missed scheduling opportunities after which the 
CapacityScheduler 
-  attempts to schedule rack-local containers. 
-  Typically this should be set to number of nodes in the cluster, By 
default is setting 
-  approximately number of nodes in one rack which is 40.
+  attempts to schedule rack-local containers.
+  When setting this parameter, the size of the cluster should be taken 
into account.
+  We use 40 as the default value, which is approximately the number of 
nodes in one rack.
+
+  
+
+  
+yarn.scheduler.capacity.rack-locality-additional-delay
+-1
+
+  Number of additional missed scheduling opportunities over the 
node-locality-delay
+  ones, after which the CapacityScheduler attempts to schedule off-switch 
containers,
+  instead of rack-local ones.
+  Example: with node-locality-delay=40 and rack-locality-delay=20, the 
scheduler will
+  attempt rack-local assignments after 40 missed opportunities, and 
off-switch assignments
+  after 40+20=60 missed opportunities.
+  When setting this parameter, the size of the cluster should be taken 
into account.
+  We use -1 as the default value, which disables this feature. In this 
case, the number
+  of missed opportunities for assigning off-switch containers is 
calculated based on
+  the number of containers and unique locations specified in the resource 
request,
+  as well as the size of the cluster.
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7288294/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 91e29d5..294897f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1304,6 +1304,11 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 return appSchedulingInfo.getSchedulingPlacementSet(schedulerRequestKey);
   }
 
+  public Map getResourceRequests(
+  SchedulerRequestKey schedulerRequestKey) {
+return appSchedulingInfo.getSchedulingPlacementSet(schedulerRequestKey)
+

[06/45] hadoop git commit: HADOOP-14293. Initialize FakeTimer with a less trivial value.

2017-04-17 Thread inigoiri
HADOOP-14293. Initialize FakeTimer with a less trivial value.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e2d9a49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e2d9a49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e2d9a49

Branch: refs/heads/HDFS-10467
Commit: 3e2d9a498e518a29a6a8b33697afd5abdde49a02
Parents: 954f7f5
Author: Andrew Wang 
Authored: Mon Apr 10 11:37:01 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 .../src/test/java/org/apache/hadoop/util/FakeTimer.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e2d9a49/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
index 2b5f850..1b17ce7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
@@ -33,7 +33,8 @@ public class FakeTimer extends Timer {
 
   /** Constructs a FakeTimer with a non-zero value */
   public FakeTimer() {
-nowNanos = 1000;  // Initialize with a non-trivial value.
+// Initialize with a non-trivial value.
+nowNanos = TimeUnit.MILLISECONDS.toNanos(1000);
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/45] hadoop git commit: YARN-6258. localBaseAddress for CORS proxy configuration is not working when suffixed with forward slash in new YARN UI. Contributed by Gergely Novák.

2017-04-17 Thread inigoiri
YARN-6258. localBaseAddress for CORS proxy configuration is not working when 
suffixed with forward slash in new YARN UI. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e51fb985
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e51fb985
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e51fb985

Branch: refs/heads/HDFS-10467
Commit: e51fb98541d0d331a743ab52d794c32a4c641e12
Parents: 0e4661f
Author: Sunil G 
Authored: Fri Apr 7 11:42:50 2017 +0530
Committer: Inigo 
Committed: Mon Apr 17 11:16:58 2017 -0700

--
 .../hadoop-yarn-ui/src/main/webapp/app/services/hosts.js   | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51fb985/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
index 19863e1..807844e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
@@ -61,7 +61,11 @@ export default Ember.Service.extend({
   },
 
   localBaseAddress: Ember.computed(function () {
-return this.localAddress();
+var url = this.localAddress();
+if (url.endsWith('/')) {
+  url = url.slice(0, -1);
+}
+return url;
   }),
 
   timelineWebAddress: Ember.computed(function () {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/45] hadoop git commit: HADOOP-14290. Update SLF4J from 1.7.10 to 1.7.25.

2017-04-17 Thread inigoiri
HADOOP-14290. Update SLF4J from 1.7.10 to 1.7.25.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77880cca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77880cca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77880cca

Branch: refs/heads/HDFS-10467
Commit: 77880cca01e1a8deb03d671a3e11f2f22e25c901
Parents: 616c8c6
Author: Akira Ajisaka 
Authored: Sun Apr 9 01:46:01 2017 +0900
Committer: Inigo 
Committed: Mon Apr 17 11:16:59 2017 -0700

--
 LICENSE.txt| 6 +++---
 hadoop-project/pom.xml | 9 ++---
 2 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77880cca/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 52da57a..a7e43c4 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -631,9 +631,9 @@ Azure Data Lake Store - Java client SDK 2.0.11
 JCodings 1.0.8
 Joni 2.1.2
 Mockito 1.8.5
-JUL to SLF4J bridge 1.7.10
-SLF4J API Module 1.7.10
-SLF4J LOG4J-12 Binding 1.7.10
+JUL to SLF4J bridge 1.7.25
+SLF4J API Module 1.7.25
+SLF4J LOG4J-12 Binding 1.7.25
 

 
 The MIT License (MIT)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77880cca/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index bf93f0f..dec2b06 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -74,6 +74,9 @@
 1.9.13
 2.7.8
 
+
+1.7.25
+
 
 1.0
 
@@ -853,17 +856,17 @@
   
 org.slf4j
 slf4j-api
-1.7.10
+${slf4j.version}
   
   
 org.slf4j
 slf4j-log4j12
-1.7.10
+${slf4j.version}
   
   
 org.slf4j
 jul-to-slf4j
-1.7.10
+${slf4j.version}
   
   
 org.eclipse.jdt


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/45] hadoop git commit: HADOOP-14287. Compiling trunk with -DskipShade fails. Contributed by Arun Suresh.

2017-04-17 Thread inigoiri
HADOOP-14287. Compiling trunk with -DskipShade fails. Contributed by Arun 
Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b093621
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b093621
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b093621

Branch: refs/heads/HDFS-10467
Commit: 7b093621ec401a1311e9701cfc6b52d034635728
Parents: 224295f
Author: Andrew Wang 
Authored: Fri Apr 7 13:43:06 2017 -0700
Committer: Inigo 
Committed: Mon Apr 17 11:16:58 2017 -0700

--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b093621/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml 
b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
index 02e5824..99fd0c2 100644
--- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
+++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
@@ -147,6 +147,11 @@
 
 
   org.apache.hadoop
+  hadoop-hdfs-client
+  test
+
+
+  org.apache.hadoop
   hadoop-hdfs
   test
   test-jar


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: YARN-6288. Exceptions during aggregated log writes are mishandled. Contributed by Akira Ajisaka

2017-04-17 Thread inigoiri
YARN-6288. Exceptions during aggregated log writes are mishandled. Contributed 
by Akira Ajisaka


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c11900e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c11900e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c11900e

Branch: refs/heads/HDFS-10467
Commit: 8c11900e83e29c05caa9daa804d59c8fa4619e37
Parents: 52c0ac5
Author: Jason Lowe 
Authored: Thu Apr 6 16:24:36 2017 -0500
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../hadoop/yarn/client/cli/TestLogsCLI.java |  58 -
 .../logaggregation/AggregatedLogFormat.java |  30 +++--
 .../logaggregation/TestAggregatedLogFormat.java | 123 ++-
 .../logaggregation/TestAggregatedLogsBlock.java |  25 ++--
 .../logaggregation/TestContainerLogsUtils.java  |  15 +--
 .../logaggregation/AppLogAggregatorImpl.java|  70 +--
 .../TestAppLogAggregatorImpl.java   |   8 +-
 7 files changed, 169 insertions(+), 160 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c11900e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index 05993d5..37c859c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -1345,18 +1345,18 @@ public class TestLogsCLI {
 Path path =
 new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
 + System.currentTimeMillis());
-AggregatedLogFormat.LogWriter writer =
-new AggregatedLogFormat.LogWriter(configuration, path, ugi);
-writer.writeApplicationOwner(ugi.getUserName());
-
-Map appAcls =
-new HashMap();
-appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
-writer.writeApplicationACLs(appAcls);
-writer.append(new AggregatedLogFormat.LogKey(containerId),
-  new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
-UserGroupInformation.getCurrentUser().getShortUserName()));
-writer.close();
+try (AggregatedLogFormat.LogWriter writer =
+ new AggregatedLogFormat.LogWriter()) {
+  writer.initialize(configuration, path, ugi);
+  writer.writeApplicationOwner(ugi.getUserName());
+
+  Map appAcls = new HashMap<>();
+  appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
+  writer.writeApplicationACLs(appAcls);
+  writer.append(new AggregatedLogFormat.LogKey(containerId),
+  new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
+  UserGroupInformation.getCurrentUser().getShortUserName()));
+}
   }
 
   private static void 
uploadEmptyContainerLogIntoRemoteDir(UserGroupInformation ugi,
@@ -1365,23 +1365,23 @@ public class TestLogsCLI {
 Path path =
 new Path(appDir, LogAggregationUtils.getNodeString(nodeId)
 + System.currentTimeMillis());
-AggregatedLogFormat.LogWriter writer =
-new AggregatedLogFormat.LogWriter(configuration, path, ugi);
-writer.writeApplicationOwner(ugi.getUserName());
-
-Map appAcls =
-new HashMap();
-appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
-writer.writeApplicationACLs(appAcls);
-DataOutputStream out = writer.getWriter().prepareAppendKey(-1);
-new AggregatedLogFormat.LogKey(containerId).write(out);
-out.close();
-out = writer.getWriter().prepareAppendValue(-1);
-new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
-  UserGroupInformation.getCurrentUser().getShortUserName()).write(out,
-  new HashSet());
-out.close();
-writer.close();
+try (AggregatedLogFormat.LogWriter writer =
+ new AggregatedLogFormat.LogWriter()) {
+  writer.initialize(configuration, path, ugi);
+  writer.writeApplicationOwner(ugi.getUserName());
+
+  Map appAcls = new HashMap<>();
+  appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
+  writer.writeApplicationACLs(appAcls);
+  DataOutputStream out = writer.getWriter().prepareAppendKey(-1);
+  new 

[25/50] [abbrv] hadoop git commit: HDFS-11598. Improve -setrep for Erasure Coded files. Contributed by Yiqun Lin.

2017-04-17 Thread inigoiri
HDFS-11598. Improve -setrep for Erasure Coded files. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4d65779
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4d65779
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4d65779

Branch: refs/heads/HDFS-10467
Commit: b4d65779841d824d0bff5f106b2888e8d4dbe8a8
Parents: 285478a
Author: Wei-Chiu Chuang 
Authored: Mon Apr 3 07:57:28 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:21 2017 -0700

--
 .../apache/hadoop/fs/shell/SetReplication.java  | 17 ++--
 .../hadoop/hdfs/TestSetrepIncreasing.java   | 44 
 2 files changed, 57 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4d65779/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
index 2231c58..16e6e92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
@@ -85,11 +85,20 @@ class SetReplication extends FsCommand {
 }
 
 if (item.stat.isFile()) {
-  if (!item.fs.setReplication(item.path, newRep)) {
-throw new IOException("Could not set replication for: " + item);
+  // Do the checking if the file is erasure coded since
+  // replication factor for an EC file is meaningless.
+  if (!item.stat.isErasureCoded()) {
+if (!item.fs.setReplication(item.path, newRep)) {
+  throw new IOException("Could not set replication for: " + item);
+}
+out.println("Replication " + newRep + " set: " + item);
+if (waitOpt) {
+  waitList.add(item);
+}
+  } else {
+out.println("Did not set replication for: " + item
++ ", because it's an erasure coded file.");
   }
-  out.println("Replication " + newRep + " set: " + item);
-  if (waitOpt) waitList.add(item);
 } 
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4d65779/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
index fee30b5..50d7b27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.PrintStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -28,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.junit.Test;
 
@@ -102,4 +105,45 @@ public class TestSetrepIncreasing {
   cluster.shutdown();
 }
  }
+
+  @Test
+  public void testSetRepOnECFile() throws Exception {
+ClientProtocol client;
+Configuration conf = new HdfsConfiguration();
+conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+StripedFileTestUtil.getDefaultECPolicy().getName());
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+.build();
+cluster.waitActive();
+client = NameNodeProxies.createProxy(conf,
+cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+client.setErasureCodingPolicy("/",
+StripedFileTestUtil.getDefaultECPolicy().getName());
+
+FileSystem dfs = cluster.getFileSystem();
+try {
+  Path d = new Path("/tmp");
+  dfs.mkdirs(d);
+  Path f = new Path(d, "foo");
+  dfs.createNewFile(f);
+  FileStatus file = dfs.getFileStatus(f);
+  assertTrue(file.isErasureCoded());
+
+  ByteArrayOutputStream out = new ByteArrayOutputStream();
+  System.setOut(new PrintStream(out));
+  String[] args = 

[04/50] [abbrv] hadoop git commit: HADOOP-11794. Enable distcp to copy blocks in parallel. Contributed by Yongjun Zhang, Wei-Chiu Chuang, Xiao Chen.

2017-04-17 Thread inigoiri
HADOOP-11794. Enable distcp to copy blocks in parallel. Contributed by Yongjun 
Zhang, Wei-Chiu Chuang, Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92421f27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92421f27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92421f27

Branch: refs/heads/HDFS-10467
Commit: 92421f27929388af769ca978c317c01597fdac91
Parents: df919ee
Author: Yongjun Zhang 
Authored: Thu Mar 30 17:01:15 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:19 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  22 +-
 .../org/apache/hadoop/tools/CopyListing.java|  37 +-
 .../hadoop/tools/CopyListingFileStatus.java |  87 -
 .../java/org/apache/hadoop/tools/DistCp.java|  52 +++
 .../apache/hadoop/tools/DistCpOptionSwitch.java |  10 +
 .../org/apache/hadoop/tools/DistCpOptions.java  |  22 +-
 .../org/apache/hadoop/tools/OptionsParser.java  |  36 +-
 .../apache/hadoop/tools/SimpleCopyListing.java  |  83 +++--
 .../hadoop/tools/mapred/CopyCommitter.java  | 174 -
 .../apache/hadoop/tools/mapred/CopyMapper.java  |  40 +-
 .../tools/mapred/RetriableFileCopyCommand.java  |  26 +-
 .../tools/mapred/UniformSizeInputFormat.java|   5 +-
 .../apache/hadoop/tools/util/DistCpUtils.java   | 111 +-
 .../src/site/markdown/DistCp.md.vm  |   1 +
 .../apache/hadoop/tools/TestDistCpSystem.java   | 368 +--
 .../apache/hadoop/tools/TestOptionsParser.java  |   2 +-
 .../hadoop/tools/mapred/TestCopyCommitter.java  |   5 +-
 17 files changed, 971 insertions(+), 110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92421f27/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 1329195..9b782f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -862,7 +862,27 @@ public class DFSTestUtil {
   out.write(toAppend);
 }
   }
-  
+
+  /**
+   * Append specified length of bytes to a given file, starting with new block.
+   * @param fs The file system
+   * @param p Path of the file to append
+   * @param length Length of bytes to append to the file
+   * @throws IOException
+   */
+  public static void appendFileNewBlock(DistributedFileSystem fs,
+  Path p, int length) throws IOException {
+assert fs.exists(p);
+assert length >= 0;
+byte[] toAppend = new byte[length];
+Random random = new Random();
+random.nextBytes(toAppend);
+try (FSDataOutputStream out = fs.append(p,
+EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null)) {
+  out.write(toAppend);
+}
+  }
+
   /**
* @return url content as string (UTF-8 encoding assumed)
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92421f27/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
index 481aa61..9ebf9d2 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
@@ -145,12 +145,22 @@ public abstract class CopyListing extends Configured {
 Configuration config = getConf();
 FileSystem fs = pathToListFile.getFileSystem(config);
 
-Path sortedList = DistCpUtils.sortListing(fs, config, pathToListFile);
+final boolean splitLargeFile = options.splitLargeFile();
+
+// When splitLargeFile is enabled, we don't randomize the copylist
+// earlier, so we don't do the sorting here. For a file that has
+// multiple entries due to split, we check here that their
+//  is continuous.
+//
+Path checkPath = splitLargeFile?
+pathToListFile : DistCpUtils.sortListing(fs, config, pathToListFile);
 
 SequenceFile.Reader reader = new SequenceFile.Reader(
-  config, SequenceFile.Reader.file(sortedList));
+  config, SequenceFile.Reader.file(checkPath));
 try {
   Text lastKey = new Text("*"); //source relative path can never hold *
+  long lastChunkOffset = -1;
+  long 

[08/50] [abbrv] hadoop git commit: HDFS-11603. Improve slow mirror/disk warnings in BlockReceiver.

2017-04-17 Thread inigoiri
HDFS-11603. Improve slow mirror/disk warnings in BlockReceiver.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70708956
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70708956
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70708956

Branch: refs/heads/HDFS-10467
Commit: 707089566cbfa6abbb77760f78f80556f175aeca
Parents: 98c25b7
Author: Arpit Agarwal 
Authored: Fri Mar 31 12:10:20 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:20 2017 -0700

--
 .../hadoop/hdfs/protocol/DatanodeInfo.java  |  1 +
 .../hdfs/server/datanode/BlockReceiver.java | 61 +++-
 2 files changed, 47 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70708956/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index e1698c9..0a8c915 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -55,6 +55,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private String softwareVersion;
   private List dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
+  public static final DatanodeInfo[] EMPTY_ARRAY = {};
 
   // Datanode administrative states
   public enum AdminStates {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70708956/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index bb6bd55..00109e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -100,6 +100,7 @@ class BlockReceiver implements Closeable {
   private DataTransferThrottler throttler;
   private ReplicaOutputStreams streams;
   private DatanodeInfo srcDataNode = null;
+  private DatanodeInfo[] downstreamDNs = DatanodeInfo.EMPTY_ARRAY;
   private final DataNode datanode;
   volatile private boolean mirrorError;
 
@@ -424,10 +425,10 @@ class BlockReceiver implements Closeable {
   }
 }
 long duration = Time.monotonicNow() - begin;
-if (duration > datanodeSlowLogThresholdMs) {
+if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) {
   LOG.warn("Slow flushOrSync took " + duration + "ms (threshold="
   + datanodeSlowLogThresholdMs + "ms), isSync:" + isSync + ", 
flushTotalNanos="
-  + flushTotalNanos + "ns");
+  + flushTotalNanos + "ns, volume=" + getVolumeBaseUri());
 }
   }
 
@@ -578,9 +579,10 @@ class BlockReceiver implements Closeable {
 mirrorAddr,
 duration);
 trackSendPacketToLastNodeInPipeline(duration);
-if (duration > datanodeSlowLogThresholdMs) {
+if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) {
   LOG.warn("Slow BlockReceiver write packet to mirror took " + duration
-  + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
+  + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), "
+  + "downstream DNs=" + Arrays.toString(downstreamDNs));
 }
   } catch (IOException e) {
 handleMirrorOutError(e);
@@ -711,9 +713,10 @@ class BlockReceiver implements Closeable {
   streams.writeDataToDisk(dataBuf.array(),
   startByteToDisk, numBytesToDisk);
   long duration = Time.monotonicNow() - begin;
-  if (duration > datanodeSlowLogThresholdMs) {
+  if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) {
 LOG.warn("Slow BlockReceiver write data to disk cost:" + duration
-+ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
++ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), "
++ "volume=" + getVolumeBaseUri());
   }
 
   if (duration > maxWriteToDiskMs) {
@@ -902,9 +905,10 @@ class BlockReceiver implements Closeable {
 }
 

[22/50] [abbrv] hadoop git commit: YARN-6109. Add an ability to convert ChildQueue to ParentQueue. (Xuan Gong via wangda)

2017-04-17 Thread inigoiri
YARN-6109. Add an ability to convert ChildQueue to ParentQueue. (Xuan Gong via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9544fac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9544fac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9544fac

Branch: refs/heads/HDFS-10467
Commit: f9544fac483678dc6df7f3116fb89b1e41b3c7ba
Parents: b8a0b18
Author: Wangda Tan 
Authored: Tue Apr 4 14:38:45 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:21 2017 -0700

--
 .../capacity/CapacitySchedulerQueueManager.java |  13 +++
 .../scheduler/capacity/ParentQueue.java |  17 ++-
 .../capacity/TestCapacityScheduler.java | 103 +++
 3 files changed, 132 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9544fac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index 76cb5d6..c92c343 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -264,6 +264,8 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
   /**
* Ensure all existing queues are present. Queues cannot be deleted if its 
not
* in Stopped state, Queue's cannot be moved from one hierarchy to other 
also.
+   * Previous child queue could be converted into parent queue if it is in
+   * STOPPED state.
*
* @param queues existing queues
* @param newQueues new queues
@@ -292,6 +294,17 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
   throw new IOException(queueName + " is moved from:"
   + oldQueue.getQueuePath() + " to:" + newQueue.getQueuePath()
   + " after refresh, which is not allowed.");
+} else  if (oldQueue instanceof LeafQueue
+&& newQueue instanceof ParentQueue) {
+  if (oldQueue.getState() == QueueState.STOPPED) {
+LOG.info("Converting the leaf queue: " + oldQueue.getQueuePath()
++ " to parent queue.");
+  } else {
+throw new IOException("Can not convert the leaf queue: "
++ oldQueue.getQueuePath() + " to parent queue since "
++ "it is not yet in stopped state. Current State : "
++ oldQueue.getState());
+  }
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9544fac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index f84b7a4..1579472 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -315,7 +315,22 @@ public class ParentQueue extends AbstractCSQueue {
 
 // Check if the child-queue already exists
 if (childQueue != null) {
-  // Re-init existing child queues
+  // Check if the child-queue has been converted into parent queue.
+  // The CS has already checked to ensure that this child-queue is in
+ 

[10/50] [abbrv] hadoop git commit: YARN-6411. Clean up the overwrite of createDispatcher() in subclass of MockRM. Contributed by Yufei Gu

2017-04-17 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0db5dd2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
index b4adf48..75ef5c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestNodeBlacklistingOnAMFailures.java
@@ -33,8 +33,6 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.event.Dispatcher;
-import org.apache.hadoop.yarn.event.DrainDispatcher;
 import 
org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRestart;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -65,8 +63,7 @@ public class TestNodeBlacklistingOnAMFailures {
 conf.setBoolean(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_ENABLED,
 true);
 
-DrainDispatcher dispatcher = new DrainDispatcher();
-MockRM rm = startRM(conf, dispatcher);
+MockRM rm = startRM(conf);
 CapacityScheduler scheduler = (CapacityScheduler) 
rm.getResourceScheduler();
 
 // Register 5 nodes, so that we can blacklist atleast one if AM container
@@ -122,7 +119,7 @@ public class TestNodeBlacklistingOnAMFailures {
 // Try the current node a few times
 for (int i = 0; i <= 2; i++) {
   currentNode.nodeHeartbeat(true);
-  dispatcher.await();
+  rm.drainEvents();
 
   Assert.assertEquals(
   "AppAttemptState should still be SCHEDULED if currentNode is "
@@ -132,7 +129,7 @@ public class TestNodeBlacklistingOnAMFailures {
 
 // Now try the other node
 otherNode.nodeHeartbeat(true);
-dispatcher.await();
+rm.drainEvents();
 
 // Now the AM container should be allocated
 MockRM.waitForState(attempt, RMAppAttemptState.ALLOCATED, 2);
@@ -169,8 +166,7 @@ public class TestNodeBlacklistingOnAMFailures {
 conf.setBoolean(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_ENABLED,
 true);
 
-DrainDispatcher dispatcher = new DrainDispatcher();
-MockRM rm = startRM(conf, dispatcher);
+MockRM rm = startRM(conf);
 CapacityScheduler scheduler = (CapacityScheduler) 
rm.getResourceScheduler();
 
 // Register 5 nodes, so that we can blacklist atleast one if AM container
@@ -227,7 +223,7 @@ public class TestNodeBlacklistingOnAMFailures {
 System.out.println("New AppAttempt launched " + attempt.getAppAttemptId());
 
 nm2.nodeHeartbeat(true);
-dispatcher.await();
+rm.drainEvents();
 
 // Now the AM container should be allocated
 MockRM.waitForState(attempt, RMAppAttemptState.ALLOCATED, 2);
@@ -257,8 +253,7 @@ public class TestNodeBlacklistingOnAMFailures {
 conf.setBoolean(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_ENABLED,
 true);
 
-DrainDispatcher dispatcher = new DrainDispatcher();
-MockRM rm = startRM(conf, dispatcher);
+MockRM rm = startRM(conf);
 CapacityScheduler scheduler = (CapacityScheduler) 
rm.getResourceScheduler();
 
 // Register 5 nodes, so that we can blacklist atleast one if AM container
@@ -319,7 +314,7 @@ public class TestNodeBlacklistingOnAMFailures {
 nm3.nodeHeartbeat(true);
 nm4.nodeHeartbeat(true);
 nm5.nodeHeartbeat(true);
-dispatcher.await();
+rm.drainEvents();
 
 // Now the AM container should be allocated
 MockRM.waitForState(attempt, RMAppAttemptState.ALLOCATED, 2);
@@ -352,8 +347,7 @@ public class TestNodeBlacklistingOnAMFailures {
 1.5f);
 conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 100);
 
-DrainDispatcher dispatcher = new DrainDispatcher();
-MockRM rm = startRM(conf, dispatcher);
+MockRM rm = startRM(conf);
 
 MockNM node =
 new MockNM("127.0.0.1:1234", 8000, rm.getResourceTrackerService());
@@ -367,7 +361,7 @@ public class TestNodeBlacklistingOnAMFailures {
 // Now the AM container should be allocated
 RMAppAttempt attempt = MockRM.waitForAttemptScheduled(app, rm);
 

[11/50] [abbrv] hadoop git commit: YARN-6411. Clean up the overwrite of createDispatcher() in subclass of MockRM. Contributed by Yufei Gu

2017-04-17 Thread inigoiri
YARN-6411. Clean up the overwrite of createDispatcher() in subclass of MockRM. 
Contributed by Yufei Gu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0db5dd2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0db5dd2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0db5dd2

Branch: refs/heads/HDFS-10467
Commit: a0db5dd2732849a268153154fef89167cc39351b
Parents: af40cfa
Author: Jason Lowe 
Authored: Fri Mar 31 10:05:34 2017 -0500
Committer: Inigo 
Committed: Thu Apr 6 18:58:20 2017 -0700

--
 .../v2/app/rm/TestRMContainerAllocator.java | 453 +--
 .../api/impl/TestAMRMClientOnRMRestart.java |  59 +--
 .../server/resourcemanager/ACLsTestBase.java|  10 -
 .../server/resourcemanager/RMHATestBase.java|  20 +-
 .../ReservationACLsTestBase.java|   5 +-
 .../resourcemanager/TestApplicationCleanup.java |  44 +-
 .../TestApplicationMasterLauncher.java  |  11 +-
 .../TestApplicationMasterService.java   |  19 +-
 .../TestNodeBlacklistingOnAMFailures.java   |  41 +-
 .../TestReservationSystemWithRMHA.java  |   5 +-
 .../TestAMRMRPCNodeUpdates.java |  18 +-
 .../resourcetracker/TestNMReconnect.java|  14 +-
 .../rmcontainer/TestRMContainerImpl.java|   1 -
 .../capacity/TestApplicationPriority.java   |  29 +-
 .../security/TestClientToAMTokens.java  |  23 +-
 15 files changed, 277 insertions(+), 475 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0db5dd2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index e6aee6e..933bd01 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -179,21 +179,19 @@ public class TestRMContainerAllocator {
 Configuration conf = new Configuration();
 MyResourceManager rm = new MyResourceManager(conf);
 rm.start();
-DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
-.getDispatcher();
 
 // Submit the application
 RMApp app = rm.submitApp(1024);
-dispatcher.await();
+rm.drainEvents();
 
 MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
 amNodeManager.nodeHeartbeat(true);
-dispatcher.await();
+rm.drainEvents();
 
 ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
 .getAppAttemptId();
 rm.sendAMLaunched(appAttemptId);
-dispatcher.await();
+rm.drainEvents();
 
 JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
 Job mockJob = mock(Job.class);
@@ -207,7 +205,7 @@ public class TestRMContainerAllocator {
 MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
 MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
 MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
-dispatcher.await();
+rm.drainEvents();
 
 // create the container request
 ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
@@ -222,7 +220,7 @@ public class TestRMContainerAllocator {
 // this tells the scheduler about the requests
 // as nodes are not added, no allocations
 List assigned = allocator.schedule();
-dispatcher.await();
+rm.drainEvents();
 Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 Assert.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size());
 
@@ -234,7 +232,7 @@ public class TestRMContainerAllocator {
 // this tells the scheduler about the requests
 // as nodes are not added, no allocations
 assigned = allocator.schedule();
-dispatcher.await();
+rm.drainEvents();
 Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
 Assert.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size());
 
@@ -242,18 +240,18 @@ public class TestRMContainerAllocator {
 nodeManager1.nodeHeartbeat(true); // Node heartbeat
 nodeManager2.nodeHeartbeat(true); // Node heartbeat
 nodeManager3.nodeHeartbeat(true); // Node heartbeat
-

[18/50] [abbrv] hadoop git commit: HADOOP-14272. Azure: WasbRemoteCallHelper should use String equals for comparison. Contributed by Santhosh G Nayak

2017-04-17 Thread inigoiri
HADOOP-14272. Azure: WasbRemoteCallHelper should use String equals for 
comparison. Contributed by Santhosh G Nayak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5e97cfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5e97cfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5e97cfe

Branch: refs/heads/HDFS-10467
Commit: c5e97cfe656815bd5be50e19b8eec60f4dc6eaa9
Parents: 53a513f
Author: Mingliang Liu 
Authored: Tue Apr 4 11:03:59 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:21 2017 -0700

--
 .../java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e97cfe/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
index 09ea084..b43e5ae 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/WasbRemoteCallHelper.java
@@ -88,7 +88,8 @@ class WasbRemoteCallHelper {
   }
 
   Header contentTypeHeader = response.getFirstHeader("Content-Type");
-  if (contentTypeHeader == null || contentTypeHeader.getValue() != 
APPLICATION_JSON) {
+  if (contentTypeHeader == null
+  || !APPLICATION_JSON.equals(contentTypeHeader.getValue())) {
 throw new WasbRemoteCallException(getRequest.getURI().toString() + ":" 
+
 "Content-Type mismatch: expected: " + APPLICATION_JSON +
 ", got " + ((contentTypeHeader!=null) ? 
contentTypeHeader.getValue() : "NULL")


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: HDFS-11551. Handle SlowDiskReport from DataNode at the NameNode. Contributed by Hanisha Koneru.

2017-04-17 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10467 6c399a88e -> 0e4661f7f


HDFS-11551. Handle SlowDiskReport from DataNode at the NameNode. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af40cfa1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af40cfa1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af40cfa1

Branch: refs/heads/HDFS-10467
Commit: af40cfa1cb96a3f70eae867576090421df1abfeb
Parents: 20e57a9
Author: Hanisha Koneru 
Authored: Thu Mar 30 22:41:26 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:19 2017 -0700

--
 .../hdfs/server/protocol/SlowDiskReports.java   |  28 +-
 .../server/blockmanagement/DatanodeManager.java |  34 +-
 .../server/blockmanagement/SlowDiskTracker.java | 291 
 .../datanode/metrics/DataNodeDiskMetrics.java   |  35 +-
 .../blockmanagement/TestSlowDiskTracker.java| 448 +++
 5 files changed, 812 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af40cfa1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index ef4d09e..8095c2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -48,7 +48,7 @@ public final class SlowDiskReports {
   private final Map> slowDisks;
 
   /**
-   * An object representing a SlowPeerReports with no entries. Should
+   * An object representing a SlowDiskReports with no entries. Should
* be used instead of null or creating new objects when there are
* no slow peers to report.
*/
@@ -119,8 +119,28 @@ public final class SlowDiskReports {
* Lists the types of operations on which disk latencies are measured.
*/
   public enum DiskOp {
-METADATA,
-READ,
-WRITE
+METADATA("MetadataOp"),
+READ("ReadIO"),
+WRITE("WriteIO");
+
+private final String value;
+
+DiskOp(final String v) {
+  this.value = v;
+}
+
+@Override
+public String toString() {
+  return value;
+}
+
+public static DiskOp fromValue(final String value) {
+  for (DiskOp as : DiskOp.values()) {
+if (as.value.equals(value)) {
+  return as;
+}
+  }
+  return null;
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af40cfa1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index e22b7af..18135a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -38,6 +38,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
@@ -180,9 +181,15 @@ public class DatanodeManager {
* True if we should process latency metrics from downstream peers.
*/
   private final boolean dataNodePeerStatsEnabled;
+  /**
+   *  True if we should process latency metrics from individual DN disks.
+   */
+  private final boolean dataNodeDiskStatsEnabled;
 
   @Nullable
   private final SlowPeerTracker slowPeerTracker;
+  @Nullable
+  private final SlowDiskTracker slowDiskTracker;
   
   /**
* The minimum time between resending caching directives to Datanodes,
@@ -208,9 +215,16 @@ public class DatanodeManager {
 this.dataNodePeerStatsEnabled = 

[45/50] [abbrv] hadoop git commit: HDFS-11131. TestThrottledAsyncChecker#testCancellation is flaky.

2017-04-17 Thread inigoiri
HDFS-11131. TestThrottledAsyncChecker#testCancellation is flaky.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ae4d414
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ae4d414
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ae4d414

Branch: refs/heads/HDFS-10467
Commit: 6ae4d414f321efa0edcf69abb649cb6dbfe8f3eb
Parents: a8d602e
Author: Arpit Agarwal 
Authored: Wed Apr 5 16:01:54 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../datanode/checker/ThrottledAsyncChecker.java |  25 ++--
 .../checker/TestThrottledAsyncChecker.java  | 118 ++-
 2 files changed, 43 insertions(+), 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae4d414/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index 7584d97..b71c015 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -187,28 +187,21 @@ public class ThrottledAsyncChecker implements 
AsyncChecker {
 
   /**
* {@inheritDoc}.
+   *
+   * The results of in-progress checks are not useful during shutdown,
+   * so we optimize for faster shutdown by interrupt all actively
+   * executing checks.
*/
   @Override
   public void shutdownAndWait(long timeout, TimeUnit timeUnit)
   throws InterruptedException {
-// Try orderly shutdown.
-executorService.shutdown();
-
-if (!executorService.awaitTermination(timeout, timeUnit)) {
-  // Interrupt executing tasks and wait again.
-  executorService.shutdownNow();
-  executorService.awaitTermination(timeout, timeUnit);
-}
 if (scheduledExecutorService != null) {
-  // Try orderly shutdown
-  scheduledExecutorService.shutdown();
-
-  if (!scheduledExecutorService.awaitTermination(timeout, timeUnit)) {
-// Interrupt executing tasks and wait again.
-scheduledExecutorService.shutdownNow();
-scheduledExecutorService.awaitTermination(timeout, timeUnit);
-  }
+  scheduledExecutorService.shutdownNow();
+  scheduledExecutorService.awaitTermination(timeout, timeUnit);
 }
+
+executorService.shutdownNow();
+executorService.awaitTermination(timeout, timeUnit);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae4d414/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java
index 00b1af2..4ed6371 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.FakeTimer;
@@ -29,12 +27,9 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.annotation.Nonnull;
-import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import static org.junit.Assert.assertFalse;
@@ -94,34 +89,8 @@ public class TestThrottledAsyncChecker {
   }
 
   @Test (timeout=6)
-  public void testCancellation() throws Exception {
-LatchedCheckable target = new LatchedCheckable();
-final FakeTimer timer = new FakeTimer();
-final 

[41/50] [abbrv] hadoop git commit: HFDS-11596. hadoop-hdfs-client jar is in the wrong directory in release tarball. Contributed by Yuanbo Liu.

2017-04-17 Thread inigoiri
HFDS-11596. hadoop-hdfs-client jar is in the wrong directory in release 
tarball. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/546d5339
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/546d5339
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/546d5339

Branch: refs/heads/HDFS-10467
Commit: 546d533928319f9ef2cb0d8fb0c31715c6738d17
Parents: e054888
Author: Andrew Wang 
Authored: Wed Apr 5 16:04:09 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  | 5 +
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 5 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml | 2 +-
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml| 5 +
 .../hadoop-yarn-applications-distributedshell/pom.xml   | 5 +
 .../hadoop-yarn-server-resourcemanager/pom.xml  | 5 +
 .../hadoop-yarn-server-timeline-pluginstorage/pom.xml   | 5 +
 7 files changed, 31 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/546d5339/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index f5cfd01..a05304b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -173,6 +173,11 @@
 
 
   org.apache.hadoop
+  hadoop-hdfs-client
+  provided
+
+
+  org.apache.hadoop
   hadoop-common
   test
   test-jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/546d5339/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
index aceb5bc..403f097 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
@@ -57,6 +57,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
 
   org.apache.hadoop
+  hadoop-hdfs-client
+  provided
+
+
+  org.apache.hadoop
   hadoop-hdfs
   test
   test-jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/546d5339/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 65c3943..9b6c058 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -60,7 +60,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
   org.apache.hadoop
   hadoop-hdfs-client
-  compile
+  provided
 
 
   org.apache.zookeeper

http://git-wip-us.apache.org/repos/asf/hadoop/blob/546d5339/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
--
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index 0d9e789..1d692a5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -105,6 +105,11 @@
   test
 
 
+  org.apache.hadoop
+  hadoop-hdfs-client
+  test
+
+
   com.google.inject.extensions
   guice-servlet
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/546d5339/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index a564b82..b3db4e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -153,6 +153,11 @@
 
 
   org.apache.hadoop
+  hadoop-hdfs-client
+  test
+
+
+  org.apache.hadoop
   hadoop-hdfs
   test
   test-jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/546d5339/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 

[27/50] [abbrv] hadoop git commit: HDFS-9651. All web UIs should include a robots.txt file. Contributed by Lars Francke.

2017-04-17 Thread inigoiri
HDFS-9651. All web UIs should include a robots.txt file. Contributed by Lars 
Francke.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69e0934f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69e0934f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69e0934f

Branch: refs/heads/HDFS-10467
Commit: 69e0934fc7310de92279490ba5b3b7687187ddad
Parents: bf4ed74
Author: Akira Ajisaka 
Authored: Wed Apr 5 12:45:28 2017 +0900
Committer: Inigo 
Committed: Thu Apr 6 18:58:22 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69e0934f/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 9e59a31..65c3943 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -386,6 +386,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 src/test/resources/editsStored*
 src/test/resources/empty-file
 src/main/webapps/datanode/robots.txt
+src/main/webapps/hdfs/robots.txt
+src/main/webapps/journal/robots.txt
+src/main/webapps/secondary/robots.txt
 src/contrib/**
 src/site/resources/images/*
 src/main/webapps/static/bootstrap-3.0.2/**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HDFS-11592. Closing a file has a wasteful preconditions in NameNode. Contributed by Eric Badger

2017-04-17 Thread inigoiri
HDFS-11592. Closing a file has a wasteful preconditions in NameNode. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df919ee7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df919ee7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df919ee7

Branch: refs/heads/HDFS-10467
Commit: df919ee734289072c42a7133889a3c2005b7ae77
Parents: 2ad28df
Author: Mingliang Liu 
Authored: Thu Mar 30 15:44:06 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:19 2017 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/INodeFile.java| 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df919ee7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 2b0e0ad..3da6aa7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -328,9 +328,11 @@ public class INodeFile extends INodeWithAdditionalFields
 for (int i = 0; i < blocks.length; i++) {
   final String err = checkBlockComplete(blocks, i, numCommittedAllowed,
   minReplication);
-  Preconditions.checkState(err == null,
-  "Unexpected block state: %s, file=%s (%s), blocks=%s (i=%s)",
-  err, this, getClass().getSimpleName(), Arrays.asList(blocks), i);
+  if(err != null) {
+throw new IllegalStateException(String.format("Unexpected block state: 
" +
+"%s, file=%s (%s), blocks=%s (i=%s)", err, this,
+getClass().getSimpleName(), Arrays.asList(blocks), i));
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: MAPREDUCE-6824. TaskAttemptImpl#createCommonContainerLaunchContext is longer than 150 lines. Contributed by Chris Trezzo.

2017-04-17 Thread inigoiri
MAPREDUCE-6824. TaskAttemptImpl#createCommonContainerLaunchContext is longer 
than 150 lines. Contributed by Chris Trezzo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b87e1a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b87e1a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b87e1a2

Branch: refs/heads/HDFS-10467
Commit: 2b87e1a219a46de8537e53f774943c509bc0d678
Parents: 51456bf
Author: Akira Ajisaka 
Authored: Mon Apr 3 13:06:24 2017 +0900
Committer: Inigo 
Committed: Thu Apr 6 18:58:20 2017 -0700

--
 .../v2/app/job/impl/TaskAttemptImpl.java| 285 ++-
 1 file changed, 153 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b87e1a2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 4305824..9ea1b9a 100755
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -755,7 +755,7 @@ public abstract class TaskAttemptImpl implements
 new HashMap();
 
 // Application environment
-Map environment = new HashMap();
+Map environment;
 
 // Service data
 Map serviceData = new HashMap();
@@ -763,157 +763,178 @@ public abstract class TaskAttemptImpl implements
 // Tokens
 ByteBuffer taskCredentialsBuffer = ByteBuffer.wrap(new byte[]{});
 try {
-  FileSystem remoteFS = FileSystem.get(conf);
-
-  //  Set up JobJar to be localized properly on the remote NM.
-  String jobJar = conf.get(MRJobConfig.JAR);
-  if (jobJar != null) {
-final Path jobJarPath = new Path(jobJar);
-final FileSystem jobJarFs = FileSystem.get(jobJarPath.toUri(), conf);
-Path remoteJobJar = jobJarPath.makeQualified(jobJarFs.getUri(),
-jobJarFs.getWorkingDirectory());
-LocalResource rc = createLocalResource(jobJarFs, remoteJobJar,
-LocalResourceType.PATTERN, LocalResourceVisibility.APPLICATION);
-String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, 
-JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
-rc.setPattern(pattern);
-localResources.put(MRJobConfig.JOB_JAR, rc);
-LOG.info("The job-jar file on the remote FS is "
-+ remoteJobJar.toUri().toASCIIString());
-  } else {
-// Job jar may be null. For e.g, for pipes, the job jar is the hadoop
-// mapreduce jar itself which is already on the classpath.
-LOG.info("Job jar is not present. "
-+ "Not adding any jar to the list of resources.");
-  }
-  //  End of JobJar setup
-
-  //  Set up JobConf to be localized properly on the remote NM.
-  Path path =
-  MRApps.getStagingAreaDir(conf, UserGroupInformation
-  .getCurrentUser().getShortUserName());
-  Path remoteJobSubmitDir =
-  new Path(path, oldJobId.toString());
-  Path remoteJobConfPath = 
-  new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
-  localResources.put(
-  MRJobConfig.JOB_CONF_FILE,
-  createLocalResource(remoteFS, remoteJobConfPath,
-  LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
-  LOG.info("The job-conf file on the remote FS is "
-  + remoteJobConfPath.toUri().toASCIIString());
-  //  End of JobConf setup
 
-  // Setup DistributedCache
-  MRApps.setupDistributedCache(conf, localResources);
+  configureJobJar(conf, localResources);
 
-  // Setup up task credentials buffer
-  LOG.info("Adding #" + credentials.numberOfTokens()
-  + " tokens and #" + credentials.numberOfSecretKeys()
-  + " secret keys for NM use for launching container");
-  Credentials taskCredentials = new Credentials(credentials);
+  configureJobConf(conf, localResources, oldJobId);
 
-  // LocalStorageToken is needed 

[48/50] [abbrv] hadoop git commit: HADOOP-14276. Add a nanosecond API to Time/Timer/FakeTimer. Contributed by Erik Krogen.

2017-04-17 Thread inigoiri
HADOOP-14276. Add a nanosecond API to Time/Timer/FakeTimer. Contributed by Erik 
Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2e33d5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2e33d5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2e33d5f

Branch: refs/heads/HDFS-10467
Commit: b2e33d5f7b61c6124837b5cbb3af07e4e15e6163
Parents: 66b8830
Author: Zhe Zhang 
Authored: Thu Apr 6 16:52:22 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:24 2017 -0700

--
 .../apache/hadoop/util/LightWeightCache.java| 20 +---
 .../main/java/org/apache/hadoop/util/Time.java  | 10 
 .../main/java/org/apache/hadoop/util/Timer.java | 10 
 .../java/org/apache/hadoop/util/FakeTimer.java  | 24 
 .../hadoop/util/TestLightWeightCache.java   | 19 ++--
 5 files changed, 52 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2e33d5f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java
index a0a553a..d79aade 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java
@@ -76,14 +76,6 @@ public class LightWeightCache extends 
LightWeightGSet {
   return l > r? 1: l < r? -1: 0;
 }
   };
-
-  /** A clock for measuring time so that it can be mocked in unit tests. */
-  static class Clock {
-/** @return the current time. */
-long currentTime() {
-  return System.nanoTime();
-}
-  }
   
   private static int updateRecommendedLength(int recommendedLength,
   int sizeLimit) {
@@ -102,7 +94,7 @@ public class LightWeightCache extends 
LightWeightGSet {
   private final long creationExpirationPeriod;
   private final long accessExpirationPeriod;
   private final int sizeLimit;
-  private final Clock clock;
+  private final Timer timer;
 
   /**
* @param recommendedLength Recommended size of the internal array.
@@ -120,7 +112,7 @@ public class LightWeightCache extends 
LightWeightGSet {
   final long creationExpirationPeriod,
   final long accessExpirationPeriod) {
 this(recommendedLength, sizeLimit,
-creationExpirationPeriod, accessExpirationPeriod, new Clock());
+creationExpirationPeriod, accessExpirationPeriod, new Timer());
   }
 
   @VisibleForTesting
@@ -128,7 +120,7 @@ public class LightWeightCache extends 
LightWeightGSet {
   final int sizeLimit,
   final long creationExpirationPeriod,
   final long accessExpirationPeriod,
-  final Clock clock) {
+  final Timer timer) {
 super(updateRecommendedLength(recommendedLength, sizeLimit));
 
 this.sizeLimit = sizeLimit;
@@ -147,11 +139,11 @@ public class LightWeightCache extends 
LightWeightGSet {
 
 this.queue = new PriorityQueue(
 sizeLimit > 0? sizeLimit + 1: 1 << 10, expirationTimeComparator);
-this.clock = clock;
+this.timer = timer;
   }
 
   void setExpirationTime(final Entry e, final long expirationPeriod) {
-e.setExpirationTime(clock.currentTime() + expirationPeriod);
+e.setExpirationTime(timer.monotonicNowNanos() + expirationPeriod);
   }
 
   boolean isExpired(final Entry e, final long now) {
@@ -168,7 +160,7 @@ public class LightWeightCache extends 
LightWeightGSet {
 
   /** Evict expired entries. */
   private void evictExpiredEntries() {
-final long now = clock.currentTime();
+final long now = timer.monotonicNowNanos();
 for(int i = 0; i < EVICTION_LIMIT; i++) {
   final Entry peeked = queue.peek();
   if (peeked == null || !isExpired(peeked, now)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2e33d5f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
index e96fa77..db5a567 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
@@ -66,6 +66,16 @@ public 

[38/50] [abbrv] hadoop git commit: HDFS-11362. StorageDirectory should initialize a non-null default StorageDirType. Contribute by Hanisha Koneru.

2017-04-17 Thread inigoiri
HDFS-11362. StorageDirectory should initialize a non-null default 
StorageDirType. Contribute by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c477f23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c477f23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c477f23

Branch: refs/heads/HDFS-10467
Commit: 7c477f238b022bba1865bc9650fff50d4afd4065
Parents: 8c11900
Author: Xiaoyu Yao 
Authored: Thu Apr 6 14:13:22 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/server/common/Storage.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c477f23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 4493772..414d3a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIOException;
@@ -275,12 +276,10 @@ public abstract class Storage extends StorageInfo {
 
 private final StorageLocation location;
 public StorageDirectory(File dir) {
-  // default dirType is null
   this(dir, null, false);
 }
 
 public StorageDirectory(StorageLocation location) {
-  // default dirType is null
   this(null, false, location);
 }
 
@@ -337,7 +336,8 @@ public abstract class Storage extends StorageInfo {
 boolean isShared, StorageLocation location) {
   this.root = dir;
   this.lock = null;
-  this.dirType = dirType;
+  // default dirType is UNDEFINED
+  this.dirType = (dirType == null ? NameNodeDirType.UNDEFINED : dirType);
   this.isShared = isShared;
   this.location = location;
   assert location == null || dir == null ||


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: YARN-6436. TestSchedulingPolicy#testParseSchedulingPolicy timeout is too low. (Eric Badger via kasha)

2017-04-17 Thread inigoiri
YARN-6436. TestSchedulingPolicy#testParseSchedulingPolicy timeout is too low. 
(Eric Badger via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb8a9250
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb8a9250
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb8a9250

Branch: refs/heads/HDFS-10467
Commit: bb8a92504e3f70cb463b6efe18760dada93bfa08
Parents: f9544fa
Author: Karthik Kambatla 
Authored: Tue Apr 4 17:08:26 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:21 2017 -0700

--
 .../server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb8a9250/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
index 8dccf6e..d84f0cf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
@@ -55,7 +55,6 @@ public class TestSchedulingPolicy {
 conf = new FairSchedulerConfiguration();
   }
 
-  @Test(timeout = 1000)
   public void testParseSchedulingPolicy()
   throws AllocationConfigurationException {
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: MAPREDUCE-6201. TestNetworkedJob fails on trunk (pbacsko via rkanter)

2017-04-17 Thread inigoiri
MAPREDUCE-6201. TestNetworkedJob fails on trunk (pbacsko via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83b1d223
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83b1d223
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83b1d223

Branch: refs/heads/HDFS-10467
Commit: 83b1d223da9c0ac55c4d1c858c1fd393b494f665
Parents: b2e33d5
Author: Robert Kanter 
Authored: Thu Apr 6 17:44:47 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:24 2017 -0700

--
 .../src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83b1d223/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java
index 45c7d1f..65b9dbd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestNetworkedJob.java
@@ -381,6 +381,9 @@ public class TestNetworkedJob {
 // Expected queue names depending on Capacity Scheduler queue naming
 conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
 CapacityScheduler.class);
+// Default value is 90 - if you have low disk space,
+// testNetworkedJob will fail
+conf.set(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, "99");
 return MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HADOOP-14280. Fix compilation of TestKafkaMetrics.

2017-04-17 Thread inigoiri
HADOOP-14280. Fix compilation of TestKafkaMetrics.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26f5552f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26f5552f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26f5552f

Branch: refs/heads/HDFS-10467
Commit: 26f5552f0b6e8a01fd77942c641a9a1cc3be60ae
Parents: 04ef448
Author: Andrew Wang 
Authored: Wed Apr 5 15:12:03 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:22 2017 -0700

--
 hadoop-tools/hadoop-kafka/pom.xml   | 26 
 .../hadoop/metrics2/impl/TestKafkaMetrics.java  |  4 +--
 2 files changed, 2 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26f5552f/hadoop-tools/hadoop-kafka/pom.xml
--
diff --git a/hadoop-tools/hadoop-kafka/pom.xml 
b/hadoop-tools/hadoop-kafka/pom.xml
index 06c38f7..13e0ac0 100644
--- a/hadoop-tools/hadoop-kafka/pom.xml
+++ b/hadoop-tools/hadoop-kafka/pom.xml
@@ -36,32 +36,6 @@
 true
   
 
-  
-
-  tests-off
-  
-
-  src/test/resources/auth-keys.xml
-
-  
-  
-true
-  
-
-
-  tests-on
-  
-
-  src/test/resources/auth-keys.xml
-
-  
-  
-false
-  
-
-
-  
-
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26f5552f/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
--
diff --git 
a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
 
b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
index eff1afa..8479298 100644
--- 
a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
+++ 
b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.metrics2.impl;
 
-import com.google.common.base.Objects;
+import com.google.common.base.MoreObjects;
 import com.google.common.collect.Lists;
 import org.apache.commons.configuration2.SubsetConfiguration;
 import org.apache.hadoop.metrics2.AbstractMetric;
@@ -74,7 +74,7 @@ public class TestKafkaMetrics {
 
 @Override
 public String toString() {
-  return Objects.toStringHelper(this).add("name", name())
+  return MoreObjects.toStringHelper(this).add("name", name())
   .add("description", desc).toString();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HDFS-11560. Expose slow disks via NameNode JMX. Contributed by Hanisha Koneru.

2017-04-17 Thread inigoiri
HDFS-11560. Expose slow disks via NameNode JMX. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3422a3dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3422a3dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3422a3dd

Branch: refs/heads/HDFS-10467
Commit: 3422a3ddf02695df5b2db196d04299d9a8f68a04
Parents: 7070895
Author: Hanisha Koneru 
Authored: Fri Mar 31 13:50:29 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:20 2017 -0700

--
 .../server/blockmanagement/DatanodeManager.java |  9 +++
 .../server/blockmanagement/SlowDiskTracker.java |  3 +
 .../hadoop/hdfs/server/namenode/NameNode.java   |  6 ++
 .../server/namenode/NameNodeStatusMXBean.java   |  8 +++
 .../blockmanagement/TestSlowDiskTracker.java| 13 +
 .../namenode/TestNameNodeStatusMXBean.java  | 59 +++-
 6 files changed, 85 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3422a3dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 18135a8..c7bdca9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1907,5 +1907,14 @@ public class DatanodeManager {
   public SlowDiskTracker getSlowDiskTracker() {
 return slowDiskTracker;
   }
+  /**
+   * Retrieve information about slow disks as a JSON.
+   * Returns null if we are not tracking slow disks.
+   * @return
+   */
+  public String getSlowDisksReport() {
+return slowDiskTracker != null ?
+slowDiskTracker.getSlowDiskReportAsJsonString() : null;
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3422a3dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
index 25920a2..52fce5d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
@@ -256,6 +256,9 @@ public class SlowDiskTracker {
   public String getSlowDiskReportAsJsonString() {
 ObjectMapper objectMapper = new ObjectMapper();
 try {
+  if (slowDisksReport.isEmpty()) {
+return null;
+  }
   return objectMapper.writeValueAsString(slowDisksReport);
 } catch (JsonProcessingException e) {
   // Failed to serialize. Don't log the exception call stack.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3422a3dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index e7841f0..32d268a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -1826,6 +1826,12 @@ public class NameNode extends ReconfigurableBase 
implements
 .getSlowPeersReport();
   }
 
+  @Override //NameNodeStatusMXBean
+  public String getSlowDisksReport() {
+return namesystem.getBlockManager().getDatanodeManager()
+.getSlowDisksReport();
+  }
+
   /**
* Shutdown the NN immediately in an ungraceful way. Used when it would be
* unsafe for the NN to continue operating, e.g. during a failed HA state

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3422a3dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeStatusMXBean.java
--
diff --git 

[14/50] [abbrv] hadoop git commit: HADOOP-14267. Make DistCpOptions immutable. Contributed by Mingliang Liu

2017-04-17 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1543f9c/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index af91347..8111b04 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -123,10 +123,10 @@ public class SimpleCopyListing extends CopyListing {
   }
 
   @Override
-  protected void validatePaths(DistCpOptions options)
+  protected void validatePaths(DistCpContext context)
   throws IOException, InvalidInputException {
 
-Path targetPath = options.getTargetPath();
+Path targetPath = context.getTargetPath();
 FileSystem targetFS = targetPath.getFileSystem(getConf());
 boolean targetExists = false;
 boolean targetIsFile = false;
@@ -142,12 +142,12 @@ public class SimpleCopyListing extends CopyListing {
 
 //If target is a file, then source has to be single file
 if (targetIsFile) {
-  if (options.getSourcePaths().size() > 1) {
+  if (context.getSourcePaths().size() > 1) {
 throw new InvalidInputException("Multiple source being copied to a 
file: " +
 targetPath);
   }
 
-  Path srcPath = options.getSourcePaths().get(0);
+  Path srcPath = context.getSourcePaths().get(0);
   FileSystem sourceFS = srcPath.getFileSystem(getConf());
   if (!sourceFS.isFile(srcPath)) {
 throw new InvalidInputException("Cannot copy " + srcPath +
@@ -155,12 +155,12 @@ public class SimpleCopyListing extends CopyListing {
   }
 }
 
-if (options.shouldAtomicCommit() && targetExists) {
+if (context.shouldAtomicCommit() && targetExists) {
   throw new InvalidInputException("Target path for atomic-commit already 
exists: " +
 targetPath + ". Cannot atomic-commit to pre-existing target-path.");
 }
 
-for (Path path: options.getSourcePaths()) {
+for (Path path: context.getSourcePaths()) {
   FileSystem fs = path.getFileSystem(getConf());
   if (!fs.exists(path)) {
 throw new InvalidInputException(path + " doesn't exist");
@@ -184,7 +184,7 @@ public class SimpleCopyListing extends CopyListing {
 }
 
 if (targetIsReservedRaw) {
-  options.preserveRawXattrs();
+  context.setPreserveRawXattrs(true);
   getConf().setBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, 
true);
 }
 
@@ -194,18 +194,19 @@ public class SimpleCopyListing extends CopyListing {
  */
 Credentials credentials = getCredentials();
 if (credentials != null) {
-  Path[] inputPaths = options.getSourcePaths().toArray(new Path[1]);
+  Path[] inputPaths = context.getSourcePaths()
+  .toArray(new Path[1]);
   TokenCache.obtainTokensForNamenodes(credentials, inputPaths, getConf());
 }
   }
 
   @Override
   protected void doBuildListing(Path pathToListingFile,
-DistCpOptions options) throws IOException {
-if(options.shouldUseSnapshotDiff()) {
-  doBuildListingWithSnapshotDiff(getWriter(pathToListingFile), options);
-}else {
-  doBuildListing(getWriter(pathToListingFile), options);
+DistCpContext context) throws IOException {
+if (context.shouldUseSnapshotDiff()) {
+  doBuildListingWithSnapshotDiff(getWriter(pathToListingFile), context);
+} else {
+  doBuildListing(getWriter(pathToListingFile), context);
 }
   }
 
@@ -232,22 +233,22 @@ public class SimpleCopyListing extends CopyListing {
* @throws IOException
*/
   private void addToFileListing(SequenceFile.Writer fileListWriter,
-  Path sourceRoot, Path path, DistCpOptions options) throws IOException {
+  Path sourceRoot, Path path, DistCpContext context) throws IOException {
 sourceRoot = getPathWithSchemeAndAuthority(sourceRoot);
 path = getPathWithSchemeAndAuthority(path);
 path = makeQualified(path);
 
 FileSystem sourceFS = sourceRoot.getFileSystem(getConf());
 FileStatus fileStatus = sourceFS.getFileStatus(path);
-final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
-final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
-final boolean preserveRawXAttrs = options.shouldPreserveRawXattrs();
+final boolean preserveAcls = context.shouldPreserve(FileAttribute.ACL);
+final boolean preserveXAttrs = context.shouldPreserve(FileAttribute.XATTR);
+final boolean preserveRawXAttrs = context.shouldPreserveRawXattrs();
 LinkedList fileCopyListingStatus =
 DistCpUtils.toCopyListingFileStatus(sourceFS, fileStatus,
 preserveAcls, preserveXAttrs, preserveRawXAttrs,
- 

[33/50] [abbrv] hadoop git commit: YARN-6403. Invalid local resource request can raise NPE and make NM exit. Contributed by Tao Yang

2017-04-17 Thread inigoiri
YARN-6403. Invalid local resource request can raise NPE and make NM exit. 
Contributed by Tao Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93932094
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93932094
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93932094

Branch: refs/heads/HDFS-10467
Commit: 939320941ad870adacc26fe096b278cf63529312
Parents: ca33197
Author: Jason Lowe 
Authored: Wed Apr 5 12:30:45 2017 -0500
Committer: Inigo 
Committed: Thu Apr 6 18:58:22 2017 -0700

--
 .../impl/pb/ContainerLaunchContextPBImpl.java   | 13 ++
 .../TestApplicationClientProtocolRecords.java   | 29 +
 .../containermanager/ContainerManagerImpl.java  | 10 +
 .../TestContainerManagerWithLCE.java| 11 +
 .../containermanager/TestContainerManager.java  | 45 
 5 files changed, 108 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93932094/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
index 1f76c34..f07a9d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java
@@ -208,11 +208,24 @@ extends ContainerLaunchContext {
   final Map localResources) {
 if (localResources == null)
   return;
+checkLocalResources(localResources);
 initLocalResources();
 this.localResources.clear();
 this.localResources.putAll(localResources);
   }
   
+  private void checkLocalResources(Map localResources) {
+for (Map.Entry rsrcEntry : localResources
+.entrySet()) {
+  if (rsrcEntry.getValue() == null
+  || rsrcEntry.getValue().getResource() == null) {
+throw new NullPointerException(
+"Null resource URL for local resource " + rsrcEntry.getKey() + " : 
"
++ rsrcEntry.getValue());
+  }
+}
+  }
+
   private void addLocalResourcesToProto() {
 maybeInitBuilder();
 builder.clearLocalResources();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93932094/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationClientProtocolRecords.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationClientProtocolRecords.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationClientProtocolRecords.java
index 0294ad1..8773d11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationClientProtocolRecords.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/impl/pb/TestApplicationClientProtocolRecords.java
@@ -30,6 +30,10 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -66,4 +70,29 @@ public class TestApplicationClientProtocolRecords {
 clcProto.getEnvironment().get("testCLCPBImplNullEnv"));
 
   }
+
+  /*
+   * This test validates the scenario in which the client sets a null value for
+   * local resource URL.
+   */
+  @Test
+  public void testCLCPBImplNullResourceURL() throws IOException {
+RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+try {
+  LocalResource rsrc_alpha = 
recordFactory.newRecordInstance(LocalResource.class);
+  rsrc_alpha.setResource(null);
+  

[31/50] [abbrv] hadoop git commit: HDFS-11538. Move ClientProtocol HA proxies into hadoop-hdfs-client. Contributed by Huafeng Wang.

2017-04-17 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58c01fe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
deleted file mode 100644
index 2f6c9bc..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.ha;
-
-import java.lang.reflect.InvocationHandler;
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.StandbyException;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.io.retry.MultiException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A FailoverProxyProvider implementation that technically does not "failover"
- * per-se. It constructs a wrapper proxy that sends the request to ALL
- * underlying proxies simultaneously. It assumes the in an HA setup, there will
- * be only one Active, and the active should respond faster than any configured
- * standbys. Once it receive a response from any one of the configred proxies,
- * outstanding requests to other proxies are immediately cancelled.
- */
-public class RequestHedgingProxyProvider extends
-ConfiguredFailoverProxyProvider {
-
-  public static final Logger LOG =
-  LoggerFactory.getLogger(RequestHedgingProxyProvider.class);
-
-  class RequestHedgingInvocationHandler implements InvocationHandler {
-
-final Map targetProxies;
-
-public RequestHedgingInvocationHandler(
-Map targetProxies) {
-  this.targetProxies = new HashMap<>(targetProxies);
-}
-
-/**
- * Creates a Executor and invokes all proxies concurrently. This
- * implementation assumes that Clients have configured proper socket
- * timeouts, else the call can block forever.
- *
- * @param proxy
- * @param method
- * @param args
- * @return
- * @throws Throwable
- */
-@Override
-public Object
-invoke(Object proxy, final Method method, final Object[] args)
-throws Throwable {
-  Map proxyMap = new HashMap<>();
-  int numAttempts = 0;
-
-  ExecutorService executor = null;
-  CompletionService completionService;
-  try {
-// Optimization : if only 2 proxies are configured and one had failed
-// over, then we dont need to create a threadpool etc.
-targetProxies.remove(toIgnore);
-if (targetProxies.size() == 1) {
-  ProxyInfo proxyInfo = targetProxies.values().iterator().next();
-  Object retVal = method.invoke(proxyInfo.proxy, args);
-  successfulProxy = proxyInfo;
-  return retVal;
-}
-executor = Executors.newFixedThreadPool(proxies.size());
-completionService = new ExecutorCompletionService<>(executor);
-for (final Map.Entry pEntry :
-targetProxies.entrySet()) {
-  Callable c = new Callable() {
-@Override
-public Object call() throws Exception {
-  LOG.trace("Invoking method {} on proxy {}", method,
-  pEntry.getValue().proxyInfo);
-  return method.invoke(pEntry.getValue().proxy, args);
-}
-  };
-  

[36/50] [abbrv] hadoop git commit: HDFS-11628. Clarify the behavior of HDFS Mover in documentation. Contributed by Xiaobing Zhou.

2017-04-17 Thread inigoiri
HDFS-11628. Clarify the behavior of HDFS Mover in documentation. Contributed by 
Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b8607d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b8607d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b8607d0

Branch: refs/heads/HDFS-10467
Commit: 6b8607d03fe78993743fabc7ea920bf0935500c9
Parents: 26f5552
Author: Xiaobing Zhou 
Authored: Wed Apr 5 15:33:08 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b8607d0/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 56a2ab8..91ad107 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -98,7 +98,7 @@ The effective storage policy can be retrieved by the 
"[`storagepolicies -getStor
 Mover - A New Data Migration Tool
 -
 
-A new data migration tool is added for archiving data. The tool is similar to 
Balancer. It periodically scans the files in HDFS to check if the block 
placement satisfies the storage policy. For the blocks violating the storage 
policy, it moves the replicas to a different storage type in order to fulfill 
the storage policy requirement.
+A new data migration tool is added for archiving data. The tool is similar to 
Balancer. It periodically scans the files in HDFS to check if the block 
placement satisfies the storage policy. For the blocks violating the storage 
policy, it moves the replicas to a different storage type in order to fulfill 
the storage policy requirement. Note that it always tries to move block 
replicas within the same node whenever possible. If that is not possible (e.g. 
when a node doesn’t have the target storage type) then it will copy the block 
replicas to another node over the network.
 
 * Command:
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: YARN-5797. Add metrics to the node manager for cleaning the PUBLIC and PRIVATE caches. (Chris Trezzo via mingma)

2017-04-17 Thread inigoiri
YARN-5797. Add metrics to the node manager for cleaning the PUBLIC and PRIVATE 
caches. (Chris Trezzo via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66b88300
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66b88300
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66b88300

Branch: refs/heads/HDFS-10467
Commit: 66b88300540d2147216a5f8b5e63ed3892625440
Parents: 60c3eda
Author: Ming Ma 
Authored: Thu Apr 6 16:54:43 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../containermanager/ContainerManagerImpl.java  |  8 ++--
 .../localizer/ResourceLocalizationService.java  | 13 ++-
 .../nodemanager/metrics/NodeManagerMetrics.java | 41 
 .../nodemanager/DummyContainerManager.java  |  5 ++-
 .../TestContainerManagerRecovery.java   | 11 --
 .../localizer/TestLocalCacheCleanup.java| 17 +++-
 .../TestLocalCacheDirectoryManager.java |  8 +++-
 .../TestResourceLocalizationService.java| 36 +
 8 files changed, 111 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b88300/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 85dc5fc..d82c728 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -230,7 +230,8 @@ public class ContainerManagerImpl extends CompositeService 
implements
 this.metrics = metrics;
 
 rsrcLocalizationSrvc =
-createResourceLocalizationService(exec, deletionContext, context);
+createResourceLocalizationService(exec, deletionContext, context,
+metrics);
 addService(rsrcLocalizationSrvc);
 
 containersLauncher = createContainersLauncher(context, exec);
@@ -477,9 +478,10 @@ public class ContainerManagerImpl extends CompositeService 
implements
   }
 
   protected ResourceLocalizationService createResourceLocalizationService(
-  ContainerExecutor exec, DeletionService deletionContext, Context 
context) {
+  ContainerExecutor exec, DeletionService deletionContext,
+  Context nmContext, NodeManagerMetrics nmMetrics) {
 return new ResourceLocalizationService(this.dispatcher, exec,
-deletionContext, dirsHandler, context);
+deletionContext, dirsHandler, nmContext, nmMetrics);
   }
 
   protected SharedCacheUploadService createSharedCacheUploaderService() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b88300/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 37473e3..2208f8f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -131,6 +131,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.even
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier;
 import 

[46/50] [abbrv] hadoop git commit: YARN-6448. Continuous scheduling thread crashes while sorting nodes. (Yufei Gu via kasha)

2017-04-17 Thread inigoiri
YARN-6448. Continuous scheduling thread crashes while sorting nodes. (Yufei Gu 
via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e054888d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e054888d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e054888d

Branch: refs/heads/HDFS-10467
Commit: e054888da14060bf1fdf5be1808f154dd523dc66
Parents: 6b8607d
Author: Karthik Kambatla 
Authored: Wed Apr 5 15:42:55 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../scheduler/SchedulerNode.java|  4 ++-
 .../scheduler/fair/FairScheduler.java   |  8 +++--
 .../fair/TestContinuousScheduling.java  | 36 
 3 files changed, 45 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e054888d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index db17b42..af4a001 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -25,6 +25,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -286,7 +287,8 @@ public abstract class SchedulerNode {
* container.
* @param resource Resources to deduct.
*/
-  private synchronized void deductUnallocatedResource(Resource resource) {
+  @VisibleForTesting
+  public synchronized void deductUnallocatedResource(Resource resource) {
 if (resource == null) {
   LOG.error("Invalid deduction of null resource for "
   + rmNode.getNodeAddress());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e054888d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index f3fde76..98c14ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -913,8 +913,12 @@ public class FairScheduler extends
 
   void continuousSchedulingAttempt() throws InterruptedException {
 long start = getClock().getTime();
-List nodeIdList =
-nodeTracker.sortedNodeList(nodeAvailableResourceComparator);
+List nodeIdList;
+// Hold a lock to prevent comparator order changes due to changes of node
+// unallocated resources
+synchronized (this) {
+  nodeIdList = nodeTracker.sortedNodeList(nodeAvailableResourceComparator);
+}
 
 // iterate all nodes
 for (FSSchedulerNode node : nodeIdList) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e054888d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
--
diff --git 

[50/50] [abbrv] hadoop git commit: HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and Inigo Goiri.

2017-04-17 Thread inigoiri
HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and 
Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e4661f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e4661f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e4661f7

Branch: refs/heads/HDFS-10467
Commit: 0e4661f7fe6b29b13c8c2d74ee70e8d6dd7ab3de
Parents: 83b1d22
Author: Inigo 
Authored: Thu Apr 6 19:18:52 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 19:18:52 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  11 ++
 .../server/federation/store/RecordStore.java| 100 
 .../store/driver/StateStoreSerializer.java  | 119 +++
 .../driver/impl/StateStoreSerializerPBImpl.java | 115 ++
 .../store/records/impl/pb/PBRecord.java |  47 
 .../store/records/impl/pb/package-info.java |  29 +
 .../src/main/resources/hdfs-default.xml |   8 ++
 7 files changed, 429 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4661f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 58a2823..07c2adf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1097,6 +1098,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT 
=
   "org.apache.hadoop.hdfs.server.federation.MockResolver";
 
+  // HDFS Router-based federation State Store
+  public static final String FEDERATION_STORE_PREFIX =
+  FEDERATION_ROUTER_PREFIX + "store.";
+
+  public static final String FEDERATION_STORE_SERIALIZER_CLASS =
+  DFSConfigKeys.FEDERATION_STORE_PREFIX + "serializer";
+  public static final Class
+  FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
+  StateStoreSerializerPBImpl.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4661f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
new file mode 100644
index 000..524f432
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import java.lang.reflect.Constructor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import 

[43/50] [abbrv] hadoop git commit: MAPREDUCE-6846. Fragments specified for libjar paths are not handled correctly (Contributed by Chris Trezzo via Daniel Templeton)

2017-04-17 Thread inigoiri
MAPREDUCE-6846. Fragments specified for libjar paths are not handled correctly
(Contributed by Chris Trezzo via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4db939da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4db939da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4db939da

Branch: refs/heads/HDFS-10467
Commit: 4db939daf5e052e6a9ff604a8e7150b89176faad
Parents: 8bbd335
Author: Daniel Templeton 
Authored: Wed Apr 5 17:24:09 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../hadoop/mapreduce/JobResourceUploader.java   | 103 +++-
 .../mapreduce/TestJobResourceUploader.java  | 481 ---
 2 files changed, 492 insertions(+), 92 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db939da/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index 085c966..f1cad57 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -23,6 +23,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.Map;
 
 import org.apache.commons.logging.Log;
@@ -91,7 +92,7 @@ class JobResourceUploader {
 submitJobDir = new Path(submitJobDir.toUri().getPath());
 FsPermission mapredSysPerms =
 new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
-FileSystem.mkdirs(jtFs, submitJobDir, mapredSysPerms);
+mkdirs(jtFs, submitJobDir, mapredSysPerms);
 
 Collection files = conf.getStringCollection("tmpfiles");
 Collection libjars = conf.getStringCollection("tmpjars");
@@ -116,18 +117,20 @@ class JobResourceUploader {
 job.getCredentials());
   }
 
-  private void uploadFiles(Configuration conf, Collection files,
+  @VisibleForTesting
+  void uploadFiles(Configuration conf, Collection files,
   Path submitJobDir, FsPermission mapredSysPerms, short submitReplication)
   throws IOException {
 Path filesDir = JobSubmissionFiles.getJobDistCacheFiles(submitJobDir);
 if (!files.isEmpty()) {
-  FileSystem.mkdirs(jtFs, filesDir, mapredSysPerms);
+  mkdirs(jtFs, filesDir, mapredSysPerms);
   for (String tmpFile : files) {
 URI tmpURI = null;
 try {
   tmpURI = new URI(tmpFile);
 } catch (URISyntaxException e) {
-  throw new IllegalArgumentException(e);
+  throw new IllegalArgumentException("Error parsing files argument."
+  + " Argument must be a valid URI: " + tmpFile, e);
 }
 Path tmp = new Path(tmpURI);
 Path newPath = copyRemoteFiles(filesDir, tmp, conf, submitReplication);
@@ -136,50 +139,83 @@ class JobResourceUploader {
   DistributedCache.addCacheFile(pathURI, conf);
 } catch (URISyntaxException ue) {
   // should not throw a uri exception
-  throw new IOException("Failed to create uri for " + tmpFile, ue);
+  throw new IOException(
+  "Failed to create a URI (URISyntaxException) for the remote path 
"
+  + newPath + ". This was based on the files parameter: "
+  + tmpFile,
+  ue);
 }
   }
 }
   }
 
-  private void uploadLibJars(Configuration conf, Collection libjars,
+  // Suppress warning for use of DistributedCache (it is everywhere).
+  @SuppressWarnings("deprecation")
+  @VisibleForTesting
+  void uploadLibJars(Configuration conf, Collection libjars,
   Path submitJobDir, FsPermission mapredSysPerms, short submitReplication)
   throws IOException {
 Path libjarsDir = JobSubmissionFiles.getJobDistCacheLibjars(submitJobDir);
 if (!libjars.isEmpty()) {
-  FileSystem.mkdirs(jtFs, libjarsDir, mapredSysPerms);
+  mkdirs(jtFs, libjarsDir, mapredSysPerms);
+  Collection libjarURIs = new LinkedList<>();
+  boolean foundFragment = false;
   for (String tmpjars : libjars) {
-Path tmp = new Path(tmpjars);
+URI tmpURI = null;
+try {
+  tmpURI = new 

[37/50] [abbrv] hadoop git commit: YARN-6424. TimelineCollector is not stopped when an app finishes in RM. Contributed by Varun Saxena.

2017-04-17 Thread inigoiri
YARN-6424. TimelineCollector is not stopped when an app finishes in RM. 
Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52c0ac5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52c0ac5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52c0ac5a

Branch: refs/heads/HDFS-10467
Commit: 52c0ac5a150090cd1df7892669b7797b40d03b0a
Parents: 6ae4d41
Author: Rohith Sharma K S 
Authored: Thu Apr 6 10:15:22 2017 +0530
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../metrics/TimelineServiceV2Publisher.java  | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52c0ac5a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 412d573..a8bf6bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -179,8 +179,9 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
 getTimelinelineAppMetrics(appMetrics, finishedTime);
 entity.setMetrics(entityMetrics);
 
-getDispatcher().getEventHandler().handle(new TimelineV2PublishEvent(
-SystemMetricsEventType.PUBLISH_ENTITY, entity, 
app.getApplicationId()));
+getDispatcher().getEventHandler().handle(
+new ApplicationFinishPublishEvent(SystemMetricsEventType.
+PUBLISH_APPLICATION_FINISHED_ENTITY, entity, app));
   }
 
   private Set getTimelinelineAppMetrics(
@@ -452,16 +453,16 @@ public class TimelineServiceV2Publisher extends 
AbstractSystemMetricsPublisher {
   }
 
   private class ApplicationFinishPublishEvent extends TimelineV2PublishEvent {
-private RMAppImpl app;
+private RMApp app;
 
 public ApplicationFinishPublishEvent(SystemMetricsEventType type,
-TimelineEntity entity, RMAppImpl app) {
+TimelineEntity entity, RMApp app) {
   super(type, entity, app.getApplicationId());
   this.app = app;
 }
 
 public RMAppImpl getRMAppImpl() {
-  return app;
+  return (RMAppImpl) app;
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: YARN-6406. Remove SchedulerRequestKeys when no more pending ResourceRequest. (Arun Suresh via wangda)

2017-04-17 Thread inigoiri
YARN-6406. Remove SchedulerRequestKeys when no more pending ResourceRequest. 
(Arun Suresh via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55285272
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55285272
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55285272

Branch: refs/heads/HDFS-10467
Commit: 55285272c6d3ac4b24748138eed442560f33717a
Parents: 9393209
Author: Wangda Tan 
Authored: Tue Apr 4 14:43:58 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:22 2017 -0700

--
 .../scheduler/AppSchedulingInfo.java|  47 ++
 .../LocalitySchedulingPlacementSet.java |   8 +-
 .../capacity/TestCapacityScheduler.java | 159 +++
 .../scheduler/capacity/TestLeafQueue.java   |   8 +-
 .../webapp/TestRMWebServicesApps.java   | 123 ++
 5 files changed, 266 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55285272/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index bff9c41..4de5eac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -25,12 +25,8 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
@@ -51,9 +47,8 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -86,8 +81,8 @@ public class AppSchedulingInfo {
 
   private Set requestedPartitions = new HashSet<>();
 
-  private final ConcurrentSkipListMap
-  schedulerKeys = new ConcurrentSkipListMap<>();
+  private final ConcurrentSkipListSet
+  schedulerKeys = new ConcurrentSkipListSet<>();
   final Map
   schedulerKeyToPlacementSets = new ConcurrentHashMap<>();
 
@@ -156,29 +151,6 @@ public class AppSchedulingInfo {
 LOG.info("Application " + applicationId + " requests cleared");
   }
 
-
-  private void incrementSchedulerKeyReference(
-  SchedulerRequestKey schedulerKey) {
-Integer schedulerKeyCount = schedulerKeys.get(schedulerKey);
-if (schedulerKeyCount == null) {
-  schedulerKeys.put(schedulerKey, 1);
-} else {
-  schedulerKeys.put(schedulerKey, schedulerKeyCount + 1);
-}
-  }
-
-  public void decrementSchedulerKeyReference(
-  SchedulerRequestKey schedulerKey) {
-Integer schedulerKeyCount = schedulerKeys.get(schedulerKey);
-if (schedulerKeyCount != null) {
-  if (schedulerKeyCount > 1) {
-schedulerKeys.put(schedulerKey, schedulerKeyCount - 1);
-  } else {
-schedulerKeys.remove(schedulerKey);
-  }
-}
-  }
-
   public ContainerUpdateContext getUpdateContext() {
 return updateContext;
   }
@@ -230,6 +202,10 @@ public class AppSchedulingInfo {
 }
   }
 
+  public void 

[32/50] [abbrv] hadoop git commit: HDFS-11538. Move ClientProtocol HA proxies into hadoop-hdfs-client. Contributed by Huafeng Wang.

2017-04-17 Thread inigoiri
HDFS-11538. Move ClientProtocol HA proxies into hadoop-hdfs-client. Contributed 
by Huafeng Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c58c01fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c58c01fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c58c01fe

Branch: refs/heads/HDFS-10467
Commit: c58c01fe043c0cf7182eccef1b7205adfa091201
Parents: 11406a4
Author: Andrew Wang 
Authored: Tue Apr 4 23:05:24 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:22 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  13 +
 .../org/apache/hadoop/hdfs/HAUtilClient.java|  55 +++
 .../hadoop/hdfs/NameNodeProxiesClient.java  |  15 +-
 .../hdfs/client/HdfsClientConfigKeys.java   |   1 +
 .../namenode/ha/ClientHAProxyFactory.java   |  44 ++
 .../ha/ConfiguredFailoverProxyProvider.java | 183 +++
 .../hdfs/server/namenode/ha/HAProxyFactory.java |  44 ++
 .../namenode/ha/IPFailoverProxyProvider.java| 126 +
 .../ha/RequestHedgingProxyProvider.java | 234 +
 .../ha/TestRequestHedgingProxyProvider.java | 476 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  15 +-
 .../java/org/apache/hadoop/hdfs/HAUtil.java |  57 +--
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |   3 +-
 .../hadoop/hdfs/server/namenode/DfsServlet.java |  29 +-
 .../ha/ConfiguredFailoverProxyProvider.java | 216 -
 .../namenode/ha/IPFailoverProxyProvider.java| 132 -
 .../namenode/ha/NameNodeHAProxyFactory.java |  45 ++
 .../ha/RequestHedgingProxyProvider.java | 241 --
 .../hadoop/hdfs/TestDFSClientFailover.java  |   4 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java |   2 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |   4 +-
 .../ha/TestRequestHedgingProxyProvider.java | 470 --
 23 files changed, 1247 insertions(+), 1165 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58c01fe/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index f9b2e8d..2e770cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -170,6 +170,19 @@ public class DFSUtilClient {
   }
 
   /**
+   * Returns list of InetSocketAddress corresponding to HA NN RPC addresses 
from
+   * the configuration.
+   *
+   * @param conf configuration
+   * @return list of InetSocketAddresses
+   */
+  public static Map> 
getHaNnRpcAddresses(
+  Configuration conf) {
+return DFSUtilClient.getAddresses(conf, null,
+  HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+  }
+
+  /**
* Returns list of InetSocketAddress corresponding to HA NN HTTP addresses 
from
* the configuration.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58c01fe/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
index 9f28cfc..47288f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
@@ -20,15 +20,29 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.net.InetSocketAddress;
 import java.net.URI;
+import java.util.Collection;
 
 import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX;
+import static 

[42/50] [abbrv] hadoop git commit: YARN-6381. FSAppAttempt has several variables that should be final (Contributed by Ameet Zaveri via Daniel Templeton)

2017-04-17 Thread inigoiri
YARN-6381. FSAppAttempt has several variables that should be final
(Contributed by Ameet Zaveri via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bbd3357
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bbd3357
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bbd3357

Branch: refs/heads/HDFS-10467
Commit: 8bbd335710476efd1d49c955dbad5495a7d62e00
Parents: 546d533
Author: Daniel Templeton 
Authored: Wed Apr 5 16:06:00 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../resourcemanager/scheduler/fair/FSAppAttempt.java| 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bbd3357/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index ccfcffb..e0dfb73 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -74,11 +74,11 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   private static final DefaultResourceCalculator RESOURCE_CALCULATOR
   = new DefaultResourceCalculator();
 
-  private long startTime;
-  private Priority appPriority;
-  private ResourceWeights resourceWeights;
+  private final long startTime;
+  private final Priority appPriority;
+  private final ResourceWeights resourceWeights;
   private Resource demand = Resources.createResource(0);
-  private FairScheduler scheduler;
+  private final FairScheduler scheduler;
   private FSQueue fsQueue;
   private Resource fairShare = Resources.createResource(0, 0);
 
@@ -96,9 +96,9 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
 
   // Used to record node reservation by an app.
   // Key = RackName, Value = Set of Nodes reserved by app on rack
-  private Map reservations = new HashMap<>();
+  private final Map reservations = new HashMap<>();
 
-  private List blacklistNodeIds = new ArrayList<>();
+  private final List blacklistNodeIds = new ArrayList<>();
   /**
* Delay scheduling: We often want to prioritize scheduling of node-local
* containers over rack-local or off-switch containers. To achieve this


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] [abbrv] hadoop git commit: HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by Akira Ajisaka

2017-04-17 Thread inigoiri
HADOOP-14268. Fix markdown itemization in hadoop-aws documents. Contributed by 
Akira Ajisaka


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aeb249ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aeb249ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aeb249ea

Branch: refs/heads/HDFS-10467
Commit: aeb249eaefe45e941beb29231510b1676846d218
Parents: b4d6577
Author: Mingliang Liu 
Authored: Mon Apr 3 11:07:14 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:21 2017 -0700

--
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md | 2 ++
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md   | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aeb249ea/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 82c3588..18c0ceb 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -41,6 +41,7 @@ The specifics of using these filesystems are documented in 
this section.
 
 
 See also:
+
 * [Testing](testing.html)
 * [Troubleshooting S3a](troubleshooting_s3a.html)
 
@@ -99,6 +100,7 @@ access to the data. Anyone with the credentials can not only 
read your datasets
 —they can delete them.
 
 Do not inadvertently share these credentials through means such as
+
 1. Checking in to SCM any configuration files containing the secrets.
 1. Logging them to a console, as they invariably end up being seen.
 1. Defining filesystem URIs with the credentials in the URL, such as

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aeb249ea/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
index 79551a3..39ca8f4 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
@@ -643,6 +643,7 @@ located.
 
 New tests are always welcome. Bear in mind that we need to keep costs
 and test time down, which is done by
+
 * Not duplicating tests.
 * Being efficient in your use of Hadoop API calls.
 * Isolating large/slow tests into the "scale" test group.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: HDFS-11302. Improve Logging for SSLHostnameVerifier. Contributed by Chen Liang.

2017-04-17 Thread inigoiri
HDFS-11302. Improve Logging for SSLHostnameVerifier. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8d602e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8d602e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8d602e1

Branch: refs/heads/HDFS-10467
Commit: a8d602e1089e8a44f557c24217264876e397078f
Parents: 4db939d
Author: Xiaoyu Yao 
Authored: Wed Apr 5 17:25:18 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../security/ssl/SSLHostnameVerifier.java   | 21 +++-
 1 file changed, 20 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8d602e1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
index b5ef2b2..27e4920 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
@@ -53,6 +53,8 @@ import javax.net.ssl.SSLSocket;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  
@@ -229,6 +231,12 @@ public interface SSLHostnameVerifier extends 
javax.net.ssl.HostnameVerifier {
 abstract class AbstractVerifier implements SSLHostnameVerifier {
 
 /**
+ * Writes as SSLFactory logs as it is the only consumer of this 
verifier
+ * class.
+ */
+static final Logger LOG = LoggerFactory.getLogger(SSLFactory.class);
+
+/**
  * This contains a list of 2nd-level domains that aren't allowed to
  * have wildcards when combined with country-codes.
  * For example: [*.co.uk].
@@ -354,13 +362,24 @@ public interface SSLHostnameVerifier extends 
javax.net.ssl.HostnameVerifier {
 throws SSLException {
 String[] cns = Certificates.getCNs(cert);
 String[] subjectAlts = Certificates.getDNSSubjectAlts(cert);
-check(host, cns, subjectAlts);
+try {
+check(host, cns, subjectAlts);
+} catch (SSLException e) {
+LOG.error("Host check error {}", e);
+throw e;
+}
 }
 
 public void check(final String[] hosts, final String[] cns,
   final String[] subjectAlts, final boolean ie6,
   final boolean strictWithSubDomains)
 throws SSLException {
+if (LOG.isTraceEnabled()) {
+LOG.trace("Hosts:{}, CNs:{} subjectAlts:{}, ie6:{}, " +
+"strictWithSubDomains{}", Arrays.toString(hosts),
+Arrays.toString(cns), Arrays.toString(subjectAlts), ie6,
+strictWithSubDomains);
+}
 // Build up lists of allowed hosts For logging/debugging purposes.
 StringBuffer buf = new StringBuffer(32);
 buf.append('<');


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: YARN-6004. Refactor TestResourceLocalizationService#testDownloadingResourcesOnContainer so that it is less than 150 lines. (Chris Trezzo via mingma)

2017-04-17 Thread inigoiri
YARN-6004. Refactor 
TestResourceLocalizationService#testDownloadingResourcesOnContainer so that it 
is less than 150 lines. (Chris Trezzo via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf4ed74f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf4ed74f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf4ed74f

Branch: refs/heads/HDFS-10467
Commit: bf4ed74f2f361015bac26fa874db18b09a2d1000
Parents: bb8a925
Author: Ming Ma 
Authored: Tue Apr 4 17:56:21 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:21 2017 -0700

--
 .../TestResourceLocalizationService.java| 376 +++
 1 file changed, 212 insertions(+), 164 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf4ed74f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 411..932e94f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -1124,7 +1124,6 @@ public class TestResourceLocalizationService {
   }
 
   @Test(timeout = 2)
-  @SuppressWarnings("unchecked")
   public void testDownloadingResourcesOnContainerKill() throws Exception {
 List localDirs = new ArrayList();
 String[] sDirs = new String[1];
@@ -1132,13 +1131,6 @@ public class TestResourceLocalizationService {
 sDirs[0] = localDirs.get(0).toString();
 
 conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
-DrainDispatcher dispatcher = new DrainDispatcher();
-dispatcher.init(conf);
-dispatcher.start();
-EventHandler applicationBus = mock(EventHandler.class);
-dispatcher.register(ApplicationEventType.class, applicationBus);
-EventHandler containerBus = mock(EventHandler.class);
-dispatcher.register(ContainerEventType.class, containerBus);
 
 DummyExecutor exec = new DummyExecutor();
 LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
@@ -1149,6 +1141,7 @@ public class TestResourceLocalizationService {
 delService.init(new Configuration());
 delService.start();
 
+DrainDispatcher dispatcher = getDispatcher(conf);
 ResourceLocalizationService rawService = new ResourceLocalizationService(
 dispatcher, exec, delService, dirsHandler, nmContext);
 ResourceLocalizationService spyService = spy(rawService);
@@ -1191,180 +1184,235 @@ public class TestResourceLocalizationService {
   spyService.init(conf);
   spyService.start();
 
-  final Application app = mock(Application.class);
-  final ApplicationId appId =
-  BuilderUtils.newApplicationId(314159265358979L, 3);
-  String user = "user0";
-  when(app.getUser()).thenReturn(user);
-  when(app.getAppId()).thenReturn(appId);
-  spyService.handle(new ApplicationLocalizationEvent(
-  LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
-  ArgumentMatcher matchesAppInit =
+  doLocalization(spyService, dispatcher, exec, delService);
+
+} finally {
+  spyService.stop();
+  dispatcher.stop();
+  delService.stop();
+}
+  }
+
+  private DrainDispatcher getDispatcher(Configuration config) {
+DrainDispatcher dispatcher = new DrainDispatcher();
+dispatcher.init(config);
+dispatcher.start();
+return dispatcher;
+  }
+
+  @SuppressWarnings("unchecked")
+  private EventHandler getApplicationBus(
+  DrainDispatcher dispatcher) {
+EventHandler applicationBus = mock(EventHandler.class);
+dispatcher.register(ApplicationEventType.class, applicationBus);
+return applicationBus;
+  }
+
+  @SuppressWarnings("unchecked")
+  private EventHandler getContainerBus(
+  DrainDispatcher dispatcher) {
+EventHandler containerBus = mock(EventHandler.class);
+dispatcher.register(ContainerEventType.class, containerBus);
+return 

[29/50] [abbrv] hadoop git commit: YARN-6420. RM startup failure due to wrong order in nodelabel editlog (Bibin A Chundatt via Varun Saxena)

2017-04-17 Thread inigoiri
YARN-6420. RM startup failure due to wrong order in nodelabel editlog (Bibin A 
Chundatt via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11406a44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11406a44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11406a44

Branch: refs/heads/HDFS-10467
Commit: 11406a44b3e40e4d20bd4628d392afafee126e3c
Parents: 8c69f5a
Author: Varun Saxena 
Authored: Wed Apr 5 09:16:35 2017 +0530
Committer: Inigo 
Committed: Thu Apr 6 18:58:22 2017 -0700

--
 .../resourcemanager/nodelabels/RMNodeLabelsManager.java | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11406a44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
index effe422..79b25ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
@@ -34,6 +34,7 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.nodelabels.RMNodeLabel;
@@ -134,6 +135,17 @@ public class RMNodeLabelsManager extends 
CommonNodeLabelsManager {
   }
 
   @Override
+  public void addToCluserNodeLabels(Collection labels)
+  throws IOException {
+try {
+  writeLock.lock();
+  super.addToCluserNodeLabels(labels);
+} finally {
+  writeLock.unlock();
+}
+  }
+
+  @Override
   public void
   removeLabelsFromNode(Map removeLabelsFromNode)
   throws IOException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HDFS-11608. HDFS write crashed with block size greater than 2 GB. Contributed by Xiaobing Zhou.

2017-04-17 Thread inigoiri
HDFS-11608. HDFS write crashed with block size greater than 2 GB. Contributed 
by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60c3edad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60c3edad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60c3edad

Branch: refs/heads/HDFS-10467
Commit: 60c3edad54703b8cfb4c03e297494750e226af3b
Parents: 7c477f2
Author: Xiaoyu Yao 
Authored: Thu Apr 6 16:11:55 2017 -0700
Committer: Inigo 
Committed: Thu Apr 6 18:58:23 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  39 +-
 .../protocol/datatransfer/PacketReceiver.java   |   2 +-
 .../apache/hadoop/hdfs/TestDFSOutputStream.java | 126 +++
 3 files changed, 164 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60c3edad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index e4929e1..9a52fbe 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -50,6 +50,7 @@ import 
org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@@ -122,6 +123,7 @@ public class DFSOutputStream extends FSOutputSummer
   private final EnumSet addBlockFlags;
   protected final AtomicReference cachingStrategy;
   private FileEncryptionInfo fileEncryptionInfo;
+  private int writePacketSize;
 
   /** Use {@link ByteArrayManager} to create buffer for non-heartbeat 
packets.*/
   protected DFSPacket createPacket(int packetSize, int chunksPerPkt,
@@ -202,6 +204,8 @@ public class DFSOutputStream extends FSOutputSummer
   +"{}", src);
 }
 
+initWritePacketSize();
+
 this.bytesPerChecksum = checksum.getBytesPerChecksum();
 if (bytesPerChecksum <= 0) {
   throw new HadoopIllegalArgumentException(
@@ -216,6 +220,21 @@ public class DFSOutputStream extends FSOutputSummer
 this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
   }
 
+  /**
+   * Ensures the configured writePacketSize never exceeds
+   * PacketReceiver.MAX_PACKET_SIZE.
+   */
+  private void initWritePacketSize() {
+writePacketSize = dfsClient.getConf().getWritePacketSize();
+if (writePacketSize > PacketReceiver.MAX_PACKET_SIZE) {
+  LOG.warn(
+  "Configured write packet exceeds {} bytes as max,"
+  + " using {} bytes.",
+  PacketReceiver.MAX_PACKET_SIZE, PacketReceiver.MAX_PACKET_SIZE);
+  writePacketSize = PacketReceiver.MAX_PACKET_SIZE;
+}
+  }
+
   /** Construct a new output stream for creating a file. */
   protected DFSOutputStream(DFSClient dfsClient, String src,
   HdfsFileStatus stat, EnumSet flag, Progressable progress,
@@ -489,13 +508,29 @@ public class DFSOutputStream extends FSOutputSummer
 }
 
 if (!getStreamer().getAppendChunk()) {
-  int psize = Math.min((int)(blockSize- getStreamer().getBytesCurBlock()),
-  dfsClient.getConf().getWritePacketSize());
+  final int psize = (int) Math
+  .min(blockSize - getStreamer().getBytesCurBlock(), writePacketSize);
   computePacketChunkSize(psize, bytesPerChecksum);
 }
   }
 
   /**
+   * Used in test only.
+   */
+  @VisibleForTesting
+  void setAppendChunk(final boolean appendChunk) {
+getStreamer().setAppendChunk(appendChunk);
+  }
+
+  /**
+   * Used in test only.
+   */
+  @VisibleForTesting
+  void setBytesCurBlock(final long bytesCurBlock) {
+getStreamer().setBytesCurBlock(bytesCurBlock);
+  }
+
+  /**
* if encountering a block boundary, send an empty packet to
* indicate the end of block and reset bytesCurBlock.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60c3edad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java

[34/50] [abbrv] hadoop git commit: YARN-6437. TestSignalContainer#testSignalRequestDeliveryToNM fails intermittently (Jason Lowe via Varun Saxena)

2017-04-17 Thread inigoiri
YARN-6437. TestSignalContainer#testSignalRequestDeliveryToNM fails 
intermittently (Jason Lowe via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04ef448c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04ef448c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04ef448c

Branch: refs/heads/HDFS-10467
Commit: 04ef448c270016920d71f589a0591628d267ab05
Parents: 5528527
Author: Varun Saxena 
Authored: Thu Apr 6 01:20:41 2017 +0530
Committer: Inigo 
Committed: Thu Apr 6 18:58:22 2017 -0700

--
 .../server/resourcemanager/TestSignalContainer.java| 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04ef448c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
index 692924c..2688987 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestSignalContainer.java
@@ -69,18 +69,17 @@ public class TestSignalContainer {
 
 //kick the scheduler
 nm1.nodeHeartbeat(true);
-List conts = null;
-int contReceived = 0;
+List conts = new ArrayList<>(request);
 int waitCount = 0;
-while (contReceived < request && waitCount++ < 200) {
-  LOG.info("Got " + contReceived + " containers. Waiting to get "
+while (conts.size() < request && waitCount++ < 200) {
+  LOG.info("Got " + conts.size() + " containers. Waiting to get "
+ request);
   Thread.sleep(100);
-  conts = am.allocate(new ArrayList(),
+  List allocation = am.allocate(new 
ArrayList(),
   new ArrayList()).getAllocatedContainers();
-  contReceived += conts.size();
+  conts.addAll(allocation);
 }
-Assert.assertEquals(request, contReceived);
+Assert.assertEquals(request, conts.size());
 
 for(Container container : conts) {
   rm.signalToContainer(container.getId(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   3   >