[hadoop] branch trunk updated: YARN-9524. Fixed TestAHSWebService and TestLogsCLI unit tests. Contributed by Prabhu Joseph

2019-05-06 Thread eyang
This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 49e1292  YARN-9524.  Fixed TestAHSWebService and TestLogsCLI unit 
tests. Contributed by Prabhu Joseph
49e1292 is described below

commit 49e1292ea3e4d00ab0b0191bd8c4ea4d2afed671
Author: Eric Yang 
AuthorDate: Mon May 6 19:48:45 2019 -0400

YARN-9524.  Fixed TestAHSWebService and TestLogsCLI unit tests.
Contributed by Prabhu Joseph
---
 .../apache/hadoop/yarn/client/cli/TestLogsCLI.java | 27 +---
 .../yarn/logaggregation/LogAggregationUtils.java   | 48 --
 .../hadoop/yarn/logaggregation/LogCLIHelpers.java  | 34 ++-
 .../LogAggregationFileController.java  |  2 +-
 .../ifile/LogAggregationIndexedFileController.java |  2 +-
 5 files changed, 92 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index f9061eb..801cf40 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -407,7 +407,7 @@ public class TestLogsCLI {
 
 Path path =
 new Path(remoteLogRootDir + ugi.getShortUserName()
-+ "/logs/application_0_0001");
++ "/bucket_logs/0001/application_0_0001");
 if (fs.exists(path)) {
   fs.delete(path, true);
 }
@@ -925,7 +925,6 @@ public class TestLogsCLI {
   public void testFetchApplictionLogsAsAnotherUser() throws Exception {
 String remoteLogRootDir = "target/logs/";
 String rootLogDir = "target/LocalLogs";
-
 String testUser = "test";
 UserGroupInformation testUgi = UserGroupInformation
 .createRemoteUser(testUser);
@@ -966,9 +965,9 @@ public class TestLogsCLI {
   // create container logs in localLogDir for app
   createContainerLogInLocalDir(appLogsDir, containerId, fs, logTypes);
 
-  // create the remote app dir for app
-  // but for a different user testUser"
-  Path path = new Path(remoteLogRootDir + testUser + "/logs/" + appId);
+  // create the remote app dir for app but for a different user testUser
+  Path path = new Path(remoteLogRootDir + testUser + "/bucket_logs/0001/"
+  + appId);
   if (fs.exists(path)) {
 fs.delete(path, true);
   }
@@ -1016,6 +1015,22 @@ public class TestLogsCLI {
   logMessage(containerId, "syslog")));
   sysOutStream.reset();
 
+  // Verify appOwner guessed correctly with older log dir dtructure
+  path = new Path(remoteLogRootDir + testUser + "/logs/" + appId);
+  if (fs.exists(path)) {
+fs.delete(path, true);
+  }
+  assertTrue(fs.mkdirs(path));
+  uploadContainerLogIntoRemoteDir(testUgi, configuration, rootLogDirs,
+  nodeId, containerId, path, fs);
+
+  exitCode = cli.run(new String[] {
+  "-applicationId", appId.toString()});
+  assertTrue(exitCode == 0);
+  assertTrue(sysOutStream.toString().contains(
+  logMessage(containerId, "syslog")));
+  sysOutStream.reset();
+
   // Verify that we could get the err message "Can not find the appOwner"
   // if we do not specify the appOwner, can not get appReport, and
   // the app does not exist in remote dir.
@@ -1034,7 +1049,7 @@ public class TestLogsCLI {
   System.currentTimeMillis(), 1000);
   String priorityUser = "priority";
   Path pathWithoutPerm = new Path(remoteLogRootDir + priorityUser
-  + "/logs/" + appTest);
+  + "/bucket_logs/1000/" + appTest);
   if (fs.exists(pathWithoutPerm)) {
 fs.delete(pathWithoutPerm, true);
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
index 3f5151b..deff2aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java
@@ -83,6 +83,30 @@ public class LogAggregationUtils {
  suffix), appId.toString());
   }
 
+  public static Path getOlderRemoteAppLogDir(Configuration conf,
+  ApplicationId appId, String user, Path remoteRootLogDir, String suffix)
+  throws IOException {
+org.apache.hadoop.fs.Path remoteAppDir = null;
+  

[hadoop] branch trunk updated: HADOOP-16289. Allow extra jsvc startup option in hadoop_start_secure_daemon in hadoop-functions.sh. Contributed by Siyao Meng.

2019-05-06 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 93f2283  HADOOP-16289. Allow extra jsvc startup option in 
hadoop_start_secure_daemon in hadoop-functions.sh. Contributed by Siyao Meng.
93f2283 is described below

commit 93f2283a69ea4e07a998f2a4065f238f9574921b
Author: Siyao Meng 
AuthorDate: Mon May 6 15:47:00 2019 -0700

HADOOP-16289. Allow extra jsvc startup option in hadoop_start_secure_daemon 
in hadoop-functions.sh. Contributed by Siyao Meng.

Signed-off-by: Wei-Chiu Chuang 
---
 hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index c144c7f..484fe23 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1929,6 +1929,7 @@ function hadoop_start_secure_daemon
   hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
   hadoop_debug "Final JSVC_HOME: ${JSVC_HOME}"
   hadoop_debug "jsvc: ${jsvc}"
+  hadoop_debug "Final HADOOP_DAEMON_JSVC_EXTRA_OPTS: 
${HADOOP_DAEMON_JSVC_EXTRA_OPTS}"
   hadoop_debug "Class name: ${class}"
   hadoop_debug "Command line options: $*"
 
@@ -1941,6 +1942,7 @@ function hadoop_start_secure_daemon
   # shellcheck disable=SC2086
   exec "${jsvc}" \
 "-Dproc_${daemonname}" \
+${HADOOP_DAEMON_JSVC_EXTRA_OPTS} \
 -outfile "${daemonoutfile}" \
 -errfile "${daemonerrfile}" \
 -pidfile "${daemonpidfile}" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-16289. Allow extra jsvc startup option in hadoop_start_secure_daemon in hadoop-functions.sh. Contributed by Siyao Meng.

2019-05-06 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 0958152  HADOOP-16289. Allow extra jsvc startup option in 
hadoop_start_secure_daemon in hadoop-functions.sh. Contributed by Siyao Meng.
0958152 is described below

commit 095815220df582a04f09f8b645f9f01128168bf9
Author: Siyao Meng 
AuthorDate: Mon May 6 15:47:00 2019 -0700

HADOOP-16289. Allow extra jsvc startup option in hadoop_start_secure_daemon 
in hadoop-functions.sh. Contributed by Siyao Meng.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 93f2283a69ea4e07a998f2a4065f238f9574921b)
---
 hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index f0daafd..b3b8afc 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1924,6 +1924,7 @@ function hadoop_start_secure_daemon
   hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
   hadoop_debug "Final JSVC_HOME: ${JSVC_HOME}"
   hadoop_debug "jsvc: ${jsvc}"
+  hadoop_debug "Final HADOOP_DAEMON_JSVC_EXTRA_OPTS: 
${HADOOP_DAEMON_JSVC_EXTRA_OPTS}"
   hadoop_debug "Class name: ${class}"
   hadoop_debug "Command line options: $*"
 
@@ -1936,6 +1937,7 @@ function hadoop_start_secure_daemon
   # shellcheck disable=SC2086
   exec "${jsvc}" \
 "-Dproc_${daemonname}" \
+${HADOOP_DAEMON_JSVC_EXTRA_OPTS} \
 -outfile "${daemonoutfile}" \
 -errfile "${daemonerrfile}" \
 -pidfile "${daemonpidfile}" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HADOOP-16289. Allow extra jsvc startup option in hadoop_start_secure_daemon in hadoop-functions.sh. Contributed by Siyao Meng.

2019-05-06 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 8414353  HADOOP-16289. Allow extra jsvc startup option in 
hadoop_start_secure_daemon in hadoop-functions.sh. Contributed by Siyao Meng.
8414353 is described below

commit 8414353daa2a2e59ca94bef118f992e5ba55933b
Author: Siyao Meng 
AuthorDate: Mon May 6 15:47:00 2019 -0700

HADOOP-16289. Allow extra jsvc startup option in hadoop_start_secure_daemon 
in hadoop-functions.sh. Contributed by Siyao Meng.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 93f2283a69ea4e07a998f2a4065f238f9574921b)
(cherry picked from commit 095815220df582a04f09f8b645f9f01128168bf9)
---
 hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 847240d..bcb8158 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1918,6 +1918,7 @@ function hadoop_start_secure_daemon
   hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
   hadoop_debug "Final JSVC_HOME: ${JSVC_HOME}"
   hadoop_debug "jsvc: ${jsvc}"
+  hadoop_debug "Final HADOOP_DAEMON_JSVC_EXTRA_OPTS: 
${HADOOP_DAEMON_JSVC_EXTRA_OPTS}"
   hadoop_debug "Class name: ${class}"
   hadoop_debug "Command line options: $*"
 
@@ -1930,6 +1931,7 @@ function hadoop_start_secure_daemon
   # shellcheck disable=SC2086
   exec "${jsvc}" \
 "-Dproc_${daemonname}" \
+${HADOOP_DAEMON_JSVC_EXTRA_OPTS} \
 -outfile "${daemonoutfile}" \
 -errfile "${daemonerrfile}" \
 -pidfile "${daemonpidfile}" \


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.8 updated: HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.

2019-05-06 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new d71eda9  HDFS-13677. Dynamic refresh Disk configuration results in 
overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.
d71eda9 is described below

commit d71eda92c6730f542be7ffd911cdc924d7e17b05
Author: Arpit Agarwal 
AuthorDate: Mon May 6 13:34:38 2019 -0700

HDFS-13677. Dynamic refresh Disk configuration results in overwriting 
VolumeMap. Contributed by xuzq and Stephen O'Donnell.

(cherry picked from commit 102c8fca10f3c626ab8bc47f818c8391a5c35289)
(cherry picked from commit 4a1d51dea2149e8f458467467798e81b126b7cc5)
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  2 +-
 .../server/datanode/fsdataset/impl/ReplicaMap.java | 14 -
 .../datanode/TestDataNodeHotSwapVolumes.java   | 71 ++
 .../datanode/fsdataset/impl/TestReplicaMap.java| 22 +++
 4 files changed, 107 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index b14f9e9..d14033d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -415,7 +415,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 LOG.error(errorMsg);
 throw new IOException(errorMsg);
   }
-  volumeMap.addAll(replicaMap);
+  volumeMap.mergeAll(replicaMap);
   storageMap.put(sd.getStorageUuid(),
   new DatanodeStorage(sd.getStorageUuid(),
   DatanodeStorage.State.NORMAL,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 5705792..e94670a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -126,7 +126,19 @@ class ReplicaMap {
   void addAll(ReplicaMap other) {
 map.putAll(other.map);
   }
-  
+
+
+  /**
+   * Merge all entries from the given replica map into the local replica map.
+   */
+  void mergeAll(ReplicaMap other) {
+for(String bp : other.getBlockPoolList()) {
+  for(ReplicaInfo r : other.map.get(bp)) {
+add(bp, r);
+  }
+}
+  }
+
   /**
* Remove the replica's meta information from the map that matches
* the input block's id and generation stamp
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 08df71f..a07ecc7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -47,6 +47,8 @@ import 
org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Test;
 
@@ -393,6 +395,75 @@ public class TestDataNodeHotSwapVolumes {
 verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
   }
 
+  /**
+   * Test re-adding one volume with some blocks on a running MiniDFSCluster
+   * with only one NameNode to reproduce HDFS-13677.
+   */
+  @Test(timeout=6)
+  public void testReAddVolumeWithBlocks()
+  throws IOException, ReconfigurationException,
+  InterruptedException, TimeoutException {
+startDFSCluster(1, 1);
+String bpid = cluster.getNamesystem().getBlockPoolId();
+final int numBlocks = 10;
+
+Path testFile = new Path("/test");
+createFile(testFile, numBlocks);
+
+List> blockReports =
+cluster.getAllBlockReports(bpid);
+assertEquals(1, blockReports.size());  // 1 DataNode
+assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+// Now remove the second volume
+DataNode dn = cluster.getDataNodes().get(0);
+Collection oldDirs = getDataDirs(dn);
+String 

[hadoop] branch branch-2.9 updated: HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.

2019-05-06 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new 4a1d51d  HDFS-13677. Dynamic refresh Disk configuration results in 
overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.
4a1d51d is described below

commit 4a1d51dea2149e8f458467467798e81b126b7cc5
Author: Arpit Agarwal 
AuthorDate: Mon May 6 13:34:38 2019 -0700

HDFS-13677. Dynamic refresh Disk configuration results in overwriting 
VolumeMap. Contributed by xuzq and Stephen O'Donnell.

(cherry picked from commit 102c8fca10f3c626ab8bc47f818c8391a5c35289)
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  2 +-
 .../server/datanode/fsdataset/impl/ReplicaMap.java | 14 -
 .../datanode/TestDataNodeHotSwapVolumes.java   | 70 ++
 .../datanode/fsdataset/impl/TestReplicaMap.java| 22 +++
 4 files changed, 106 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 4486b73..9946a3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -432,7 +432,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 LOG.error(errorMsg);
 throw new IOException(errorMsg);
   }
-  volumeMap.addAll(replicaMap);
+  volumeMap.mergeAll(replicaMap);
   storageMap.put(sd.getStorageUuid(),
   new DatanodeStorage(sd.getStorageUuid(),
   DatanodeStorage.State.NORMAL,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 5705792..e94670a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -126,7 +126,19 @@ class ReplicaMap {
   void addAll(ReplicaMap other) {
 map.putAll(other.map);
   }
-  
+
+
+  /**
+   * Merge all entries from the given replica map into the local replica map.
+   */
+  void mergeAll(ReplicaMap other) {
+for(String bp : other.getBlockPoolList()) {
+  for(ReplicaInfo r : other.map.get(bp)) {
+add(bp, r);
+  }
+}
+  }
+
   /**
* Remove the replica's meta information from the map that matches
* the input block's id and generation stamp
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index ea28ea4..125b431 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Assert;
@@ -416,6 +417,75 @@ public class TestDataNodeHotSwapVolumes {
 verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
   }
 
+  /**
+   * Test re-adding one volume with some blocks on a running MiniDFSCluster
+   * with only one NameNode to reproduce HDFS-13677.
+   */
+  @Test(timeout=6)
+  public void testReAddVolumeWithBlocks()
+  throws IOException, ReconfigurationException,
+  InterruptedException, TimeoutException {
+startDFSCluster(1, 1);
+String bpid = cluster.getNamesystem().getBlockPoolId();
+final int numBlocks = 10;
+
+Path testFile = new Path("/test");
+createFile(testFile, numBlocks);
+
+List> blockReports =
+cluster.getAllBlockReports(bpid);
+assertEquals(1, blockReports.size());  // 1 DataNode
+assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+// Now remove the second volume
+DataNode dn = cluster.getDataNodes().get(0);
+Collection oldDirs = getDataDirs(dn);
+String newDirs = oldDirs.iterator().next();  // Keep the first volume.
+

[hadoop] branch branch-2 updated: YARN-9529. Log correct cpu controller path on error while initializing CGroups. (Contributed by Jonathan Hung)

2019-05-06 Thread haibochen
This is an automated email from the ASF dual-hosted git repository.

haibochen pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new dc28c86  YARN-9529. Log correct cpu controller path on error while 
initializing CGroups. (Contributed by Jonathan Hung)
dc28c86 is described below

commit dc28c86270c010febd994ffe834dcd7f476c4369
Author: Haibo Chen 
AuthorDate: Mon May 6 11:55:07 2019 -0700

YARN-9529. Log correct cpu controller path on error while initializing 
CGroups. (Contributed by Jonathan Hung)

(cherry picked from commit 597fa47ad125c0871f5c4deb3a883e5b3341c67b)
(cherry picked from commit c6573562cbc43832e9332989996e5d07b8bce9b0)
(cherry picked from commit ea1f0f282bee7ca64ce9fbe1a59bb5e231420d29)
(cherry picked from commit aa7ff8552e0f1b9439926ae3476a3227dcf339f3)
---
 .../hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index 54b6e1c..a73dcbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -482,7 +482,7 @@ public class CgroupsLCEResourcesHandler implements 
LCEResourcesHandler {
 controllerPaths.put(CONTROLLER_CPU, controllerPath);
   } else {
 throw new IOException("Not able to enforce cpu weights; cannot write "
-+ "to cgroup at: " + controllerPath);
++ "to cgroup at: " + f.getPath());
   }
 } else {
   throw new IOException("Not able to enforce cpu weights; cannot find "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9529. Log correct cpu controller path on error while initializing CGroups. (Contributed by Jonathan Hung)

2019-05-06 Thread haibochen
This is an automated email from the ASF dual-hosted git repository.

haibochen pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new ea1f0f2  YARN-9529. Log correct cpu controller path on error while 
initializing CGroups. (Contributed by Jonathan Hung)
ea1f0f2 is described below

commit ea1f0f282bee7ca64ce9fbe1a59bb5e231420d29
Author: Haibo Chen 
AuthorDate: Mon May 6 11:55:07 2019 -0700

YARN-9529. Log correct cpu controller path on error while initializing 
CGroups. (Contributed by Jonathan Hung)

(cherry picked from commit 597fa47ad125c0871f5c4deb3a883e5b3341c67b)
(cherry picked from commit c6573562cbc43832e9332989996e5d07b8bce9b0)
---
 .../hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index 54b6e1c..a73dcbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -482,7 +482,7 @@ public class CgroupsLCEResourcesHandler implements 
LCEResourcesHandler {
 controllerPaths.put(CONTROLLER_CPU, controllerPath);
   } else {
 throw new IOException("Not able to enforce cpu weights; cannot write "
-+ "to cgroup at: " + controllerPath);
++ "to cgroup at: " + f.getPath());
   }
 } else {
   throw new IOException("Not able to enforce cpu weights; cannot find "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.0 updated: YARN-9529. Log correct cpu controller path on error while initializing CGroups. (Contributed by Jonathan Hung)

2019-05-06 Thread haibochen
This is an automated email from the ASF dual-hosted git repository.

haibochen pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
 new aa7ff85  YARN-9529. Log correct cpu controller path on error while 
initializing CGroups. (Contributed by Jonathan Hung)
aa7ff85 is described below

commit aa7ff8552e0f1b9439926ae3476a3227dcf339f3
Author: Haibo Chen 
AuthorDate: Mon May 6 11:55:07 2019 -0700

YARN-9529. Log correct cpu controller path on error while initializing 
CGroups. (Contributed by Jonathan Hung)

(cherry picked from commit 597fa47ad125c0871f5c4deb3a883e5b3341c67b)
(cherry picked from commit c6573562cbc43832e9332989996e5d07b8bce9b0)
(cherry picked from commit ea1f0f282bee7ca64ce9fbe1a59bb5e231420d29)
---
 .../hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index 54b6e1c..a73dcbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -482,7 +482,7 @@ public class CgroupsLCEResourcesHandler implements 
LCEResourcesHandler {
 controllerPaths.put(CONTROLLER_CPU, controllerPath);
   } else {
 throw new IOException("Not able to enforce cpu weights; cannot write "
-+ "to cgroup at: " + controllerPath);
++ "to cgroup at: " + f.getPath());
   }
 } else {
   throw new IOException("Not able to enforce cpu weights; cannot find "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9529. Log correct cpu controller path on error while initializing CGroups. (Contributed by Jonathan Hung)

2019-05-06 Thread haibochen
This is an automated email from the ASF dual-hosted git repository.

haibochen pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new c657356  YARN-9529. Log correct cpu controller path on error while 
initializing CGroups. (Contributed by Jonathan Hung)
c657356 is described below

commit c6573562cbc43832e9332989996e5d07b8bce9b0
Author: Haibo Chen 
AuthorDate: Mon May 6 11:55:07 2019 -0700

YARN-9529. Log correct cpu controller path on error while initializing 
CGroups. (Contributed by Jonathan Hung)

(cherry picked from commit 597fa47ad125c0871f5c4deb3a883e5b3341c67b)
---
 .../hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index 54b6e1c..a73dcbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -482,7 +482,7 @@ public class CgroupsLCEResourcesHandler implements 
LCEResourcesHandler {
 controllerPaths.put(CONTROLLER_CPU, controllerPath);
   } else {
 throw new IOException("Not able to enforce cpu weights; cannot write "
-+ "to cgroup at: " + controllerPath);
++ "to cgroup at: " + f.getPath());
   }
 } else {
   throw new IOException("Not able to enforce cpu weights; cannot find "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9529. Log correct cpu controller path on error while initializing CGroups. (Contributed by Jonathan Hung)

2019-05-06 Thread haibochen
This is an automated email from the ASF dual-hosted git repository.

haibochen pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 597fa47  YARN-9529. Log correct cpu controller path on error while 
initializing CGroups. (Contributed by Jonathan Hung)
597fa47 is described below

commit 597fa47ad125c0871f5c4deb3a883e5b3341c67b
Author: Haibo Chen 
AuthorDate: Mon May 6 11:55:07 2019 -0700

YARN-9529. Log correct cpu controller path on error while initializing 
CGroups. (Contributed by Jonathan Hung)
---
 .../hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index 6025260..8894767 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -476,7 +476,7 @@ public class CgroupsLCEResourcesHandler implements 
LCEResourcesHandler {
 controllerPaths.put(CONTROLLER_CPU, controllerPath);
   } else {
 throw new IOException("Not able to enforce cpu weights; cannot write "
-+ "to cgroup at: " + controllerPath);
++ "to cgroup at: " + f.getPath());
   }
 } else {
   throw new IOException("Not able to enforce cpu weights; cannot find "


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: Revert "HDDS-1384. TestBlockOutputStreamWithFailures is failing"

2019-05-06 Thread elek
This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fb7c1ca  Revert "HDDS-1384. TestBlockOutputStreamWithFailures is 
failing"
fb7c1ca is described below

commit fb7c1cad0ea93406a7272872c888d06e4e56620a
Author: Márton Elek 
AuthorDate: Mon May 6 20:17:00 2019 +0200

Revert "HDDS-1384. TestBlockOutputStreamWithFailures is failing"

This reverts commit dead9b4049484c31e0608956e53a9ef07a45819d.
---
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 62 +-
 1 file changed, 14 insertions(+), 48 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index c306f22..9fbdad7 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone;
 
 import java.io.File;
-import java.net.ServerSocket;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
@@ -64,7 +63,6 @@ import java.nio.file.Paths;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
@@ -390,9 +388,6 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
*/
   public static class Builder extends MiniOzoneCluster.Builder {
 
-private static AtomicInteger lastUsedPort =
-new AtomicInteger(1000);
-
 /**
  * Creates a new Builder.
  *
@@ -534,16 +529,14 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
  */
 List createHddsDatanodes(
 StorageContainerManager scm) throws IOException {
-
-  String scmAddress = scm.getDatanodeRpcAddress().getHostString() +
+  configureHddsDatanodes();
+  String scmAddress =  scm.getDatanodeRpcAddress().getHostString() +
   ":" + scm.getDatanodeRpcAddress().getPort();
   String[] args = new String[] {};
   conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress);
-
   List hddsDatanodes = new ArrayList<>();
   for (int i = 0; i < numOfDatanodes; i++) {
 OzoneConfiguration dnConf = new OzoneConfiguration(conf);
-configureHddsDatanodes(dnConf);
 String datanodeBaseDir = path + "/datanode-" + Integer.toString(i);
 Path metaDir = Paths.get(datanodeBaseDir, "meta");
 Path dataDir = Paths.get(datanodeBaseDir, "data", "containers");
@@ -570,14 +563,10 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
 }
 
 private void configureSCM() {
-  conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY,
-  "127.0.0.1:" + findPort());
-  conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
-  "127.0.0.1:" + findPort());
-  conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY,
-  "127.0.0.1:" + findPort());
-  conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY,
-  "127.0.0.1:" + findPort());
+  conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
+  conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, 
"127.0.0.1:0");
+  conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
+  conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
   conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);
   configureSCMheartbeat();
 }
@@ -608,42 +597,19 @@ public class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
 
 
 private void configureOM() {
-  conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:" + findPort());
-  conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY,
-  "127.0.0.1:" + findPort());
+  conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:0");
+  conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
   conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers);
 }
 
-/**
- * Return an available TCP port if available.
- * 
- * As we have a static counter the port should be unique inside the JVM..
- */
-private int findPort() {
-  while (lastUsedPort.get() < 65536) {
-try {
-  int nextPort = lastUsedPort.incrementAndGet();
-  ServerSocket socket = new ServerSocket(nextPort);
-  socket.close();
-  return nextPort;
-} catch (IOException ex) {
-  //port is not available, let's try the next one.
-  continue;
- 

[hadoop] branch trunk updated: YARN-9440. Improve diagnostics for scheduler and app activities. Contributed by Tao Yang.

2019-05-06 Thread wwei
This is an automated email from the ASF dual-hosted git repository.

wwei pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 12b7059  YARN-9440. Improve diagnostics for scheduler and app 
activities. Contributed by Tao Yang.
12b7059 is described below

commit 12b7059ddc8d8f67dd7131565f03a0e09cb92ca7
Author: Weiwei Yang 
AuthorDate: Mon May 6 20:00:15 2019 +0800

YARN-9440. Improve diagnostics for scheduler and app activities. 
Contributed by Tao Yang.
---
 .../util/resource/DefaultResourceCalculator.java   |  16 +
 .../util/resource/DominantResourceCalculator.java  |  14 +
 .../yarn/util/resource/ResourceCalculator.java |  13 +
 .../yarn/util/resource/TestResourceCalculator.java | 119 ++
 .../scheduler/AppSchedulingInfo.java   |   8 +-
 .../scheduler/activities/ActivitiesLogger.java |  87 -
 .../scheduler/activities/ActivitiesManager.java|  89 -
 .../activities/ActivityDiagnosticConstant.java |   9 +-
 .../scheduler/activities/ActivityNode.java |  27 ++
 .../scheduler/activities/AllocationActivity.java   |  18 +-
 .../scheduler/activities/AppAllocation.java|   5 +-
 .../scheduler/activities/DiagnosticsCollector.java |  44 +++
 .../activities/GenericDiagnosticsCollector.java|  85 +
 .../scheduler/activities/NodeAllocation.java   |   9 +-
 .../scheduler/capacity/LeafQueue.java  |   4 +-
 .../allocator/RegularContainerAllocator.java   |  96 ++---
 .../constraint/PlacementConstraintsUtil.java   |  46 ++-
 .../scheduler/placement/AppPlacementAllocator.java |   7 +
 .../placement/LocalityAppPlacementAllocator.java   |  19 +-
 .../SingleConstraintAppPlacementAllocator.java |  27 +-
 .../resourcemanager/webapp/dao/ActivitiesInfo.java |   8 +-
 .../webapp/dao/ActivityNodeInfo.java   |  34 +-
 .../webapp/dao/AppAllocationInfo.java  |  72 ++--
 ...tionInfo.java => AppRequestAllocationInfo.java} |  66 ++--
 .../activities/TestActivitiesManager.java  |  10 +-
 .../constraint/TestPlacementConstraintsUtil.java   |  12 +
 .../webapp/ActivitiesTestUtils.java| 195 ++
 .../webapp/TestRMWebServicesCapacitySched.java |   2 +
 .../TestRMWebServicesSchedulerActivities.java  | 406 ++---
 ...esSchedulerActivitiesWithMultiNodesEnabled.java | 198 --
 30 files changed, 1437 insertions(+), 308 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 61b1a87..1b21d2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -17,18 +17,25 @@
 */
 package org.apache.hadoop.yarn.util.resource;
 
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
 
+import java.util.Set;
+
 @Private
 @Unstable
 public class DefaultResourceCalculator extends ResourceCalculator {
   private static final Logger LOG =
   LoggerFactory.getLogger(DefaultResourceCalculator.class);
 
+  private static final Set INSUFFICIENT_RESOURCE_NAME =
+  ImmutableSet.of(ResourceInformation.MEMORY_URI);
+
   @Override
   public int compare(Resource unused, Resource lhs, Resource rhs,
   boolean singleType) {
@@ -150,4 +157,13 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   public boolean isAnyMajorResourceAboveZero(Resource resource) {
 return resource.getMemorySize() > 0;
   }
+
+  public Set getInsufficientResourceNames(Resource required,
+  Resource available) {
+if (required.getMemorySize() > available.getMemorySize()) {
+  return INSUFFICIENT_RESOURCE_NAME;
+} else {
+  return ImmutableSet.of();
+}
+  }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 16176ef..ca7360a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++