hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

2018-04-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 226bedc02 -> 1a407bc99


HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. 
Contributed by Erik Krogen

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a407bc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a407bc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a407bc9

Branch: refs/heads/trunk
Commit: 1a407bc9906306801690bc75ff0f0456f8f265fd
Parents: 226bedc
Author: Erik Krogen 
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Thu Apr 12 23:27:51 2018 -0700

--
 .../java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a407bc9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index b89cdc0..d29dd34 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -156,6 +156,7 @@ public class MiniHadoopClusterManager {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
   .nameNodeHttpPort(nnHttpPort).numDataNodes(numDataNodes)
+  .format(dfsOpts == StartupOption.FORMAT)
   .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

2018-04-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 64105868e -> bce57a139


HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. 
Contributed by Erik Krogen

(cherry picked from commit 1a407bc9906306801690bc75ff0f0456f8f265fd)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bce57a13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bce57a13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bce57a13

Branch: refs/heads/branch-3.0
Commit: bce57a139b4423ad2e85563212bb3106fc084f35
Parents: 6410586
Author: Erik Krogen 
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Thu Apr 12 23:35:58 2018 -0700

--
 .../java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bce57a13/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index 324f0ca..4d3ca48 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -156,6 +156,7 @@ public class MiniHadoopClusterManager {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
   .nameNodeHttpPort(nnHttpPort).numDataNodes(numDataNodes)
+  .format(dfsOpts == StartupOption.FORMAT)
   .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

2018-04-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 84531ad9b -> e343be46f


HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. 
Contributed by Erik Krogen

(cherry picked from commit 1a407bc9906306801690bc75ff0f0456f8f265fd)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e343be46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e343be46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e343be46

Branch: refs/heads/branch-3.1
Commit: e343be46f8aa0cabe25d85e24c1e1ec8e6ebb061
Parents: 84531ad
Author: Erik Krogen 
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Thu Apr 12 23:31:18 2018 -0700

--
 .../java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e343be46/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index b89cdc0..d29dd34 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -156,6 +156,7 @@ public class MiniHadoopClusterManager {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
   .nameNodeHttpPort(nnHttpPort).numDataNodes(numDataNodes)
+  .format(dfsOpts == StartupOption.FORMAT)
   .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

2018-04-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7393020cb -> 31d061e4e


HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. 
Contributed by Erik Krogen

(cherry picked from commit 1a407bc9906306801690bc75ff0f0456f8f265fd)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31d061e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31d061e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31d061e4

Branch: refs/heads/branch-2
Commit: 31d061e4e844147501518f64ec41cf8867cc70bd
Parents: 7393020
Author: Erik Krogen 
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Thu Apr 12 23:39:47 2018 -0700

--
 .../java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d061e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index 96d0027..becc768 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -156,6 +156,7 @@ public class MiniHadoopClusterManager {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
   .nameNodeHttpPort(nnHttpPort).numDataNodes(numDataNodes)
+  .format(dfsOpts == StartupOption.FORMAT)
   .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

2018-04-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1548205a8 -> c4ea1c8bb


HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. 
Contributed by Erik Krogen

(cherry picked from commit 1a407bc9906306801690bc75ff0f0456f8f265fd)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4ea1c8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4ea1c8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4ea1c8b

Branch: refs/heads/branch-2.8
Commit: c4ea1c8bb73a91be6233a855c62ea1a8119dbecb
Parents: 1548205
Author: Erik Krogen 
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Thu Apr 12 23:52:25 2018 -0700

--
 .../org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ea1c8b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index 2e8ba5e..4055b06 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -152,7 +152,9 @@ public class MiniHadoopClusterManager {
   URISyntaxException {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
-  .numDataNodes(numDataNodes).startupOption(dfsOpts).build();
+  .numDataNodes(numDataNodes)
+  .format(dfsOpts == StartupOption.FORMAT)
+  .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

2018-04-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 48023bda1 -> 7b0573108


HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. 
Contributed by Erik Krogen

(cherry picked from commit 1a407bc9906306801690bc75ff0f0456f8f265fd)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b057310
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b057310
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b057310

Branch: refs/heads/branch-2.9
Commit: 7b05731080b73c820c5ecdb3f85eaa0233377ffd
Parents: 48023bd
Author: Erik Krogen 
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Thu Apr 12 23:43:16 2018 -0700

--
 .../java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b057310/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index 96d0027..becc768 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -156,6 +156,7 @@ public class MiniHadoopClusterManager {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
   .nameNodeHttpPort(nnHttpPort).numDataNodes(numDataNodes)
+  .format(dfsOpts == StartupOption.FORMAT)
   .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

2018-04-13 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 b4d24d822 -> f7830f399


HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. 
Contributed by Erik Krogen

(cherry picked from commit 1a407bc9906306801690bc75ff0f0456f8f265fd)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7830f39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7830f39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7830f39

Branch: refs/heads/branch-2.7
Commit: f7830f399a73a88b20deeb881ef7b55af5e5adad
Parents: b4d24d8
Author: Erik Krogen 
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Thu Apr 12 23:54:03 2018 -0700

--
 .../org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7830f39/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index 2e8ba5e..4055b06 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -152,7 +152,9 @@ public class MiniHadoopClusterManager {
   URISyntaxException {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
-  .numDataNodes(numDataNodes).startupOption(dfsOpts).build();
+  .numDataNodes(numDataNodes)
+  .format(dfsOpts == StartupOption.FORMAT)
+  .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13423. Ozone: Clean-up of ozone related change from hadoop-hdfs-project. Contributed by Nanda Kumar.

2018-04-13 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 c36a850af -> 584c573a5


HDFS-13423. Ozone: Clean-up of ozone related change from hadoop-hdfs-project. 
Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/584c573a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/584c573a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/584c573a

Branch: refs/heads/HDFS-7240
Commit: 584c573a5604d49522c4b7766fc52f4d3eb92496
Parents: c36a850
Author: Mukul Kumar Singh 
Authored: Fri Apr 13 14:13:06 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Fri Apr 13 14:13:06 2018 +0530

--
 .../java/org/apache/hadoop/hdds/HddsUtils.java  | 46 
 .../hadoop/ozone/HddsDatanodeService.java   |  3 +-
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/common/StorageInfo.java  |  4 --
 .../hadoop/hdfs/server/datanode/DataNode.java   | 14 +-
 .../web/RestCsrfPreventionFilterHandler.java|  2 +-
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto |  8 
 .../namenode/TestFavoredNodesEndToEnd.java  |  6 +--
 .../hadoop/ozone/MiniOzoneClassicCluster.java   | 19 +---
 .../hadoop/ozone/MiniOzoneTestHelper.java   | 22 --
 .../apache/hadoop/ozone/RatisTestHelper.java|  2 +-
 .../hadoop/ozone/TestMiniOzoneCluster.java  |  8 +---
 .../apache/hadoop/ozone/TestOzoneHelper.java|  3 +-
 .../TestStorageContainerManagerHelper.java  |  2 +-
 .../TestCloseContainerHandler.java  |  2 +-
 .../ozoneimpl/TestOzoneContainerRatis.java  |  3 +-
 .../container/ozoneimpl/TestRatisManager.java   |  3 +-
 .../ksm/TestKeySpaceManagerRestInterface.java   |  3 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java|  3 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java |  3 +-
 .../apache/hadoop/ozone/scm/TestSCMMetrics.java |  3 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |  4 +-
 .../hadoop/ozone/web/TestLocalOzoneVolumes.java |  4 +-
 .../hadoop/ozone/web/TestOzoneWebAccess.java|  3 +-
 .../hadoop/ozone/web/client/TestBuckets.java|  3 +-
 .../hadoop/ozone/web/client/TestKeys.java   |  4 +-
 .../ozone/web/client/TestOzoneClient.java   |  3 +-
 .../hadoop/ozone/web/client/TestVolume.java |  3 +-
 .../web/netty/ObjectStoreRestHttpServer.java| 45 +--
 29 files changed, 109 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/584c573a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index f00f503..a0b5c47 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -22,18 +22,26 @@ import com.google.common.base.Optional;
 import com.google.common.base.Strings;
 import com.google.common.net.HostAndPort;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
 import java.nio.file.Paths;
 import java.util.Collection;
 import java.util.HashSet;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys
+.DFS_DATANODE_DNS_INTERFACE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys
+.DFS_DATANODE_DNS_NAMESERVER_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
 
@@ -269,4 +277,42 @@ public class HddsUtils {
 }
 return dataNodeIDPath;
   }
+
+  /**
+   * Returns the hostname for this datanode. If the hostname is not
+   * explicitly configured in the given config, then it is determined
+   * via the DNS class.
+   *
+   * @param conf Configuration
+   *
+   * @return the hostname (NB: may not be a FQDN)
+   * @throws UnknownHostException if the dfs.datanode.dns.interface
+   *option is used and the hostname can not be determined
+   */
+  public static String getHostName(Configuration conf)
+  throws UnknownHostException {
+String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
+if (name == null) {
+  String dnsInterface = conf.get(
+  CommonConfigurationK

hadoop git commit: HDFS-13418. NetworkTopology should be configurable when enable DFSNetworkTopology. Contributed by Tao Jie.

2018-04-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1a407bc99 -> 0725953ef


HDFS-13418. NetworkTopology should be configurable when enable 
DFSNetworkTopology. Contributed by Tao Jie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0725953e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0725953e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0725953e

Branch: refs/heads/trunk
Commit: 0725953efec89b35b7586b846abb01f7c5963b37
Parents: 1a407bc
Author: Yiqun Lin 
Authored: Fri Apr 13 17:55:45 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Apr 13 17:55:45 2018 +0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  7 +++
 .../hadoop/hdfs/net/DFSNetworkTopology.java | 10 +++-
 .../src/main/resources/hdfs-default.xml | 14 ++
 .../blockmanagement/TestDatanodeManager.java| 52 
 4 files changed, 81 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0725953e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b4b9d97..b4dab4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
@@ -1177,6 +1178,12 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   "dfs.use.dfs.network.topology";
   public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true;
 
+  public static final String DFS_NET_TOPOLOGY_IMPL_KEY =
+  "dfs.net.topology.impl";
+
+  public static final Class DFS_NET_TOPOLOGY_IMPL_DEFAULT =
+  DFSNetworkTopology.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0725953e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
index e74cdec..7889ef4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
@@ -22,11 +22,13 @@ import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ReflectionUtils;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -44,8 +46,12 @@ public class DFSNetworkTopology extends NetworkTopology {
   private static final Random RANDOM = new Random();
 
   public static DFSNetworkTopology getInstance(Configuration conf) {
-DFSNetworkTopology nt = new DFSNetworkTopology();
-return (DFSNetworkTopology)nt.init(DFSTopologyNodeImpl.FACTORY);
+
+DFSNetworkTopology nt = ReflectionUtils.newInstance(conf.getClass(
+DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_KEY,
+DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_DEFAULT,
+DFSNetworkTopology.class), conf);
+return (DFSNetworkTopology) nt.init(DFSTopologyNodeImpl.FACTORY);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0725953e/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src

hadoop git commit: HDFS-13418. NetworkTopology should be configurable when enable DFSNetworkTopology. Contributed by Tao Jie.

2018-04-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 e343be46f -> 16763d822


HDFS-13418. NetworkTopology should be configurable when enable 
DFSNetworkTopology. Contributed by Tao Jie.

(cherry picked from commit 0725953efec89b35b7586b846abb01f7c5963b37)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16763d82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16763d82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16763d82

Branch: refs/heads/branch-3.1
Commit: 16763d82297efd422ff7d62e94347c4f0097e9a8
Parents: e343be4
Author: Yiqun Lin 
Authored: Fri Apr 13 17:55:45 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Apr 13 17:57:15 2018 +0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  7 +++
 .../hadoop/hdfs/net/DFSNetworkTopology.java | 10 +++-
 .../src/main/resources/hdfs-default.xml | 14 ++
 .../blockmanagement/TestDatanodeManager.java| 52 
 4 files changed, 81 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16763d82/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b4b9d97..b4dab4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
@@ -1177,6 +1178,12 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   "dfs.use.dfs.network.topology";
   public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true;
 
+  public static final String DFS_NET_TOPOLOGY_IMPL_KEY =
+  "dfs.net.topology.impl";
+
+  public static final Class DFS_NET_TOPOLOGY_IMPL_DEFAULT =
+  DFSNetworkTopology.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16763d82/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
index e74cdec..7889ef4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
@@ -22,11 +22,13 @@ import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ReflectionUtils;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -44,8 +46,12 @@ public class DFSNetworkTopology extends NetworkTopology {
   private static final Random RANDOM = new Random();
 
   public static DFSNetworkTopology getInstance(Configuration conf) {
-DFSNetworkTopology nt = new DFSNetworkTopology();
-return (DFSNetworkTopology)nt.init(DFSTopologyNodeImpl.FACTORY);
+
+DFSNetworkTopology nt = ReflectionUtils.newInstance(conf.getClass(
+DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_KEY,
+DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_DEFAULT,
+DFSNetworkTopology.class), conf);
+return (DFSNetworkTopology) nt.init(DFSTopologyNodeImpl.FACTORY);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16763d82/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop

hadoop git commit: HDFS-13418. NetworkTopology should be configurable when enable DFSNetworkTopology. Contributed by Tao Jie.

2018-04-13 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 31d061e4e -> a7721082d


HDFS-13418. NetworkTopology should be configurable when enable 
DFSNetworkTopology. Contributed by Tao Jie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7721082
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7721082
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7721082

Branch: refs/heads/branch-2
Commit: a7721082dc3d608dc8a3ac7e1ffa0de21781f00b
Parents: 31d061e
Author: Yiqun Lin 
Authored: Fri Apr 13 17:59:35 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Apr 13 17:59:35 2018 +0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  7 +++
 .../hadoop/hdfs/net/DFSNetworkTopology.java |  9 +++-
 .../src/main/resources/hdfs-default.xml | 14 ++
 .../blockmanagement/TestDatanodeManager.java| 52 
 4 files changed, 80 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7721082/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 42ce05b..c9fef06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
@@ -1003,6 +1004,12 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   "dfs.use.dfs.network.topology";
   public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true;
 
+  public static final String DFS_NET_TOPOLOGY_IMPL_KEY =
+  "dfs.net.topology.impl";
+
+  public static final Class DFS_NET_TOPOLOGY_IMPL_DEFAULT =
+  DFSNetworkTopology.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7721082/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
index e74cdec..f3074d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
@@ -22,11 +22,13 @@ import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ReflectionUtils;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -44,8 +46,11 @@ public class DFSNetworkTopology extends NetworkTopology {
   private static final Random RANDOM = new Random();
 
   public static DFSNetworkTopology getInstance(Configuration conf) {
-DFSNetworkTopology nt = new DFSNetworkTopology();
-return (DFSNetworkTopology)nt.init(DFSTopologyNodeImpl.FACTORY);
+DFSNetworkTopology nt = ReflectionUtils.newInstance(conf.getClass(
+DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_KEY,
+DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_DEFAULT,
+DFSNetworkTopology.class), conf);
+return (DFSNetworkTopology) nt.init(DFSTopologyNodeImpl.FACTORY);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7721082/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/

hadoop git commit: HDFS-13438. Fix javadoc in FsVolumeList#removeVolume. Contributed by Shashikant Banerjee.

2018-04-13 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0725953ef -> fa8b88ab2


HDFS-13438. Fix javadoc in FsVolumeList#removeVolume. Contributed by Shashikant 
Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa8b88ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa8b88ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa8b88ab

Branch: refs/heads/trunk
Commit: fa8b88ab2b272b29cf116a5de038d78fc4357b9d
Parents: 0725953
Author: Bharat Viswanadham 
Authored: Fri Apr 13 08:56:02 2018 -0700
Committer: Bharat Viswanadham 
Committed: Fri Apr 13 08:56:02 2018 -0700

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa8b88ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 75baf84..8f52ea7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -335,7 +335,7 @@ class FsVolumeList {
 
   /**
* Dynamically remove volume in the list.
-   * @param volume the volume to be removed.
+   * @param storageLocation {@link StorageLocation} of the volume to be 
removed.
* @param clearFailure set true to remove failure info for this volume.
*/
   void removeVolume(StorageLocation storageLocation, boolean clearFailure) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13430. Fix TestEncryptionZonesWithKMS failure due to HADOOP-14445.

2018-04-13 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk fa8b88ab2 -> 650359371


HDFS-13430. Fix TestEncryptionZonesWithKMS failure due to HADOOP-14445.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65035937
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65035937
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65035937

Branch: refs/heads/trunk
Commit: 650359371175fba416331e73aa03d2a96ccb90e5
Parents: fa8b88a
Author: Xiao Chen 
Authored: Fri Apr 13 09:04:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Apr 13 09:05:17 2018 -0700

--
 .../src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65035937/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 6f9ef29..51c6c4e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -110,6 +110,7 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
 import static org.junit.Assert.assertNotNull;
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyLong;
@@ -192,6 +193,8 @@ public class TestEncryptionZones {
 // Lower the batch size for testing
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
 2);
+// disable kms client copy legacy token logic because it's irrelevant.
+conf.setBoolean(KMS_CLIENT_COPY_LEGACY_TOKEN_KEY, false);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
 cluster.waitActive();
 Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13438. Fix javadoc in FsVolumeList#removeVolume. Contributed by Shashikant Banerjee.

2018-04-13 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 16763d822 -> 8df8cad01


HDFS-13438. Fix javadoc in FsVolumeList#removeVolume. Contributed by Shashikant 
Banerjee.

(cherry picked from commit fa8b88ab2b272b29cf116a5de038d78fc4357b9d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8df8cad0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8df8cad0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8df8cad0

Branch: refs/heads/branch-3.1
Commit: 8df8cad019836bd81005392667558a3fc88459be
Parents: 16763d8
Author: Bharat Viswanadham 
Authored: Fri Apr 13 08:56:02 2018 -0700
Committer: Bharat Viswanadham 
Committed: Fri Apr 13 09:05:24 2018 -0700

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8df8cad0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 75baf84..8f52ea7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -335,7 +335,7 @@ class FsVolumeList {
 
   /**
* Dynamically remove volume in the list.
-   * @param volume the volume to be removed.
+   * @param storageLocation {@link StorageLocation} of the volume to be 
removed.
* @param clearFailure set true to remove failure info for this volume.
*/
   void removeVolume(StorageLocation storageLocation, boolean clearFailure) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13330. ShortCircuitCache#fetchOrCreate never retries. Contributed by Gabor Bota.

2018-04-13 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 650359371 -> e66e287ef


HDFS-13330. ShortCircuitCache#fetchOrCreate never retries. Contributed by Gabor 
Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e66e287e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e66e287e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e66e287e

Branch: refs/heads/trunk
Commit: e66e287efe2b43e710137a628f03c7df3ebdf498
Parents: 6503593
Author: Wei-Chiu Chuang 
Authored: Fri Apr 13 09:17:34 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Apr 13 09:17:34 2018 -0700

--
 .../hdfs/shortcircuit/ShortCircuitCache.java| 11 ++---
 .../shortcircuit/TestShortCircuitCache.java | 26 
 2 files changed, 33 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e66e287e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index b26652b..c2f0350 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -664,6 +664,7 @@ public class ShortCircuitCache implements Closeable {
 unref(replica);
   }
 
+  static final int FETCH_OR_CREATE_RETRY_TIMES = 3;
   /**
* Fetch or create a replica.
*
@@ -678,11 +679,11 @@ public class ShortCircuitCache implements Closeable {
*/
   public ShortCircuitReplicaInfo fetchOrCreate(ExtendedBlockId key,
   ShortCircuitReplicaCreator creator) {
-Waitable newWaitable = null;
+Waitable newWaitable;
 lock.lock();
 try {
   ShortCircuitReplicaInfo info = null;
-  do {
+  for (int i = 0; i < FETCH_OR_CREATE_RETRY_TIMES; i++){
 if (closed) {
   LOG.trace("{}: can't fethchOrCreate {} because the cache is closed.",
   this, key);
@@ -692,11 +693,12 @@ public class ShortCircuitCache implements Closeable {
 if (waitable != null) {
   try {
 info = fetch(key, waitable);
+break;
   } catch (RetriableException e) {
 LOG.debug("{}: retrying {}", this, e.getMessage());
   }
 }
-  } while (false);
+  }
   if (info != null) return info;
   // We need to load the replica ourselves.
   newWaitable = new Waitable<>(lock.newCondition());
@@ -717,7 +719,8 @@ public class ShortCircuitCache implements Closeable {
*
* @throws RetriableException   If the caller needs to retry.
*/
-  private ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
+  @VisibleForTesting // ONLY for testing
+  protected ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
   Waitable waitable) throws RetriableException {
 // Another thread is already in the process of loading this
 // ShortCircuitReplica.  So we simply wait for it to complete.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e66e287e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 7ba0edc..5da6a25 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.ShortCircuitReplica
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -793,4 +794,29 @@ public class TestShortCircuitCache {
 cluster.shutdown();
 sockDir.close();
   }
+
+  @Test
+  public void testFetchOrCreateRetries() throws Exception {
+try(ShortCircuitCache cache = Mockito
+.spy(new ShortCircuitCache(10, 1000

hadoop git commit: Revert "YARN-7810. Update TestDockerContainerRuntime to test with current user credential."

2018-04-13 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a7721082d -> cc2a2a8e0


Revert "YARN-7810.  Update TestDockerContainerRuntime to test with current user 
credential."

This reverts commit 724bffdb89cd62cc90a1f49c7c5e40998dc1cc0f.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc2a2a8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc2a2a8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc2a2a8e

Branch: refs/heads/branch-2
Commit: cc2a2a8e063ecdb23216830be8418736af79fe7b
Parents: a772108
Author: Wei-Chiu Chuang 
Authored: Fri Apr 13 10:13:59 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Apr 13 10:13:59 2018 -0700

--
 .../runtime/TestDockerContainerRuntime.java | 178 +--
 1 file changed, 82 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc2a2a8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index ab38ea2..aef94a7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -81,8 +81,7 @@ public class TestDockerContainerRuntime {
   private HashMap env;
   private String image;
   private String uidGidPair;
-  private String runAsUser = System.getProperty("user.name");
-  private String[] groups = {};
+  private String runAsUser;
   private String user;
   private String appId;
   private String containerIdStr = containerId;
@@ -131,37 +130,8 @@ public class TestDockerContainerRuntime {
 when(context.getEnvironment()).thenReturn(env);
 when(container.getUser()).thenReturn(submittingUser);
 
-// Get the running user's uid and gid for remap
-String uid = "";
-String gid = "";
-Shell.ShellCommandExecutor shexec1 = new Shell.ShellCommandExecutor(
-new String[]{"id", "-u", runAsUser});
-Shell.ShellCommandExecutor shexec2 = new Shell.ShellCommandExecutor(
-new String[]{"id", "-g", runAsUser});
-Shell.ShellCommandExecutor shexec3 = new Shell.ShellCommandExecutor(
-new String[]{"id", "-G", runAsUser});
-try {
-  shexec1.execute();
-  // get rid of newline at the end
-  uid = shexec1.getOutput().replaceAll("\n$", "");
-} catch (Exception e) {
-  LOG.info("Could not run id -u command: " + e);
-}
-try {
-  shexec2.execute();
-  // get rid of newline at the end
-  gid = shexec2.getOutput().replaceAll("\n$", "");
-} catch (Exception e) {
-  LOG.info("Could not run id -g command: " + e);
-}
-try {
-  shexec3.execute();
-  groups = shexec3.getOutput().replace("\n", " ").split(" ");
-} catch (Exception e) {
-  LOG.info("Could not run id -G command: " + e);
-}
-uidGidPair = uid + ":" + gid;
-
+uidGidPair = "";
+runAsUser = "run_as_user";
 user = "user";
 appId = "app_id";
 containerIdStr = containerId;
@@ -331,7 +301,7 @@ public class TestDockerContainerRuntime {
 List dockerCommands = Files.readAllLines(Paths.get
 (dockerCommandFile), Charset.forName("UTF-8"));
 
-int expected = 14;
+int expected = 13;
 int counter = 0;
 Assert.assertEquals(expected, dockerCommands.size());
 Assert.assertEquals("[docker-command-execution]",
@@ -341,8 +311,6 @@ public class TestDockerContainerRuntime {
 Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
 Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
 Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
-Assert.assertEquals("  group-add=" + String.join(",", groups),
-dockerCommands.get(counter++));
 Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
 Assert
 .assertEquals("  image=busybox:latest", dockerCommands.get(counter++));
@@ -358,7 +326,7 @@ public class TestDockerContain

[02/14] hadoop git commit: HADOOP-15062. TestCryptoStreamsWithOpensslAesCtrCryptoCodec fails on Debian 9. Contributed by Miklos Szegedi.

2018-04-13 Thread arp
HADOOP-15062. TestCryptoStreamsWithOpensslAesCtrCryptoCodec fails on Debian 9. 
Contributed by Miklos Szegedi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9014f98b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9014f98b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9014f98b

Branch: refs/heads/branch-3.1
Commit: 9014f98b603bbb680d92a54b414e4c38e329e892
Parents: ffc39ec
Author: Yufei Gu 
Authored: Tue Mar 20 15:19:18 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:12:01 2018 -0700

--
 .../org/apache/hadoop/crypto/OpensslCipher.c| 33 
 1 file changed, 33 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9014f98b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
index c7984a3..abff7ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
@@ -27,8 +27,12 @@
 #ifdef UNIX
 static EVP_CIPHER_CTX * (*dlsym_EVP_CIPHER_CTX_new)(void);
 static void (*dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *);
+#if OPENSSL_API_COMPAT < 0x1010L && OPENSSL_VERSION_NUMBER >= 0x1010L
+static int (*dlsym_EVP_CIPHER_CTX_reset)(EVP_CIPHER_CTX *);
+#else
 static int (*dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *);
 static void (*dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *);
+#endif
 static int (*dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int);
 static int (*dlsym_EVP_CIPHER_CTX_test_flags)(const EVP_CIPHER_CTX *, int);
 static int (*dlsym_EVP_CIPHER_CTX_block_size)(const EVP_CIPHER_CTX *);
@@ -123,10 +127,16 @@ JNIEXPORT void JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_initIDs
   "EVP_CIPHER_CTX_new");
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_free, env, openssl,  \
   "EVP_CIPHER_CTX_free");
+#if OPENSSL_API_COMPAT < 0x1010L && OPENSSL_VERSION_NUMBER >= 0x1010L
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_reset, env, openssl,  \
+  "EVP_CIPHER_CTX_reset");
+#else
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_cleanup, env, openssl,  \
   "EVP_CIPHER_CTX_cleanup");
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_init, env, openssl,  \
   "EVP_CIPHER_CTX_init");
+#endif
+
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_set_padding, env, openssl,  \
   "EVP_CIPHER_CTX_set_padding");
   LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_test_flags, env, openssl,  \
@@ -271,7 +281,11 @@ JNIEXPORT jlong JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_init
   (*env)->ReleaseByteArrayElements(env, key, jKey, 0);
   (*env)->ReleaseByteArrayElements(env, iv, jIv, 0);
   if (rc == 0) {
+#if OPENSSL_API_COMPAT < 0x1010L && OPENSSL_VERSION_NUMBER >= 0x1010L
+dlsym_EVP_CIPHER_CTX_reset(context);
+#else
 dlsym_EVP_CIPHER_CTX_cleanup(context);
+#endif
 THROW(env, "java/lang/InternalError", "Error in EVP_CipherInit_ex.");
 return (jlong)0;
   }
@@ -334,7 +348,11 @@ JNIEXPORT jint JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_update
   int output_len = 0;
   if (!dlsym_EVP_CipherUpdate(context, output_bytes, &output_len,  \
   input_bytes, input_len)) {
+#if OPENSSL_API_COMPAT < 0x1010L && OPENSSL_VERSION_NUMBER >= 0x1010L
+dlsym_EVP_CIPHER_CTX_reset(context);
+#else
 dlsym_EVP_CIPHER_CTX_cleanup(context);
+#endif
 THROW(env, "java/lang/InternalError", "Error in EVP_CipherUpdate.");
 return 0;
   }
@@ -376,7 +394,11 @@ JNIEXPORT jint JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_doFinal
   
   int output_len = 0;
   if (!dlsym_EVP_CipherFinal_ex(context, output_bytes, &output_len)) {
+#if OPENSSL_API_COMPAT < 0x1010L && OPENSSL_VERSION_NUMBER >= 0x1010L
+dlsym_EVP_CIPHER_CTX_reset(context);
+#else
 dlsym_EVP_CIPHER_CTX_cleanup(context);
+#endif
 THROW(env, "java/lang/InternalError", "Error in EVP_CipherFinal_ex.");
 return 0;
   }
@@ -396,6 +418,16 @@ JNIEXPORT jstring JNICALL 
Java_org_apache_hadoop_crypto_OpensslCipher_getLibrary
 (JNIEnv *env, jclass clazz) 
 {
 #ifdef UNIX
+#if OPENSSL_API_COMPAT < 0x1010L && OPENSSL_VERSION_NUMBER >= 0x1010L
+  if (dlsym_EVP_CIPHER_CTX_reset) {
+Dl_info dl_info;
+if(dladdr(
+dlsym_EVP_CIPHER_CTX_reset,
+&dl_info)) {
+  return (*env)->NewStringUTF(env, dl_info.dli_fname);
+}
+  

[07/14] hadoop git commit: HDFS-11043. TestWebHdfsTimeouts fails. Contributed by Xiaoyu Yao and Chao Sun.

2018-04-13 Thread arp
HDFS-11043. TestWebHdfsTimeouts fails. Contributed by Xiaoyu Yao and Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/867135af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/867135af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/867135af

Branch: refs/heads/branch-3.1
Commit: 867135af28af5065db0f8f73707efa3c7672da73
Parents: 56acbe4
Author: Xiaoyu Yao 
Authored: Wed Mar 21 13:53:35 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:14:47 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/867135af/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index 67c39e1..4743821 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -66,10 +66,10 @@ public class TestWebHdfsTimeouts {
 
   private static final Log LOG = LogFactory.getLog(TestWebHdfsTimeouts.class);
 
-  private static final int CLIENTS_TO_CONSUME_BACKLOG = 100;
+  private static final int CLIENTS_TO_CONSUME_BACKLOG = 129;
   private static final int CONNECTION_BACKLOG = 1;
-  private static final int SHORT_SOCKET_TIMEOUT = 5;
-  private static final int TEST_TIMEOUT = 1;
+  private static final int SHORT_SOCKET_TIMEOUT = 200;
+  private static final int TEST_TIMEOUT = 10;
 
   private List clients;
   private WebHdfsFileSystem fs;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/14] hadoop git commit: HDFS-13315. Add a test for the issue reported in HDFS-11481 which is fixed by HDFS-10997. Contributed by Yongjun Zhang.

2018-04-13 Thread arp
HDFS-13315. Add a test for the issue reported in HDFS-11481 which is fixed by 
HDFS-10997. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56acbe47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56acbe47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56acbe47

Branch: refs/heads/branch-3.1
Commit: 56acbe476b6659ee4e3f53fdb5888906625950c8
Parents: 2b46bd3
Author: Yongjun Zhang 
Authored: Tue Mar 20 23:00:39 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:14:24 2018 -0700

--
 .../hdfs/tools/snapshot/SnapshotDiff.java   |  1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 37 ++
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 41 
 .../snapshot/TestSnapshotDiffReport.java| 25 +---
 4 files changed, 80 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56acbe47/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
index 3838ca1..1237099 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
@@ -106,6 +106,7 @@ public class SnapshotDiff extends Configured implements 
Tool {
 } catch (IOException e) {
   String[] content = e.getLocalizedMessage().split("\n");
   System.err.println("snapshotDiff: " + content[0]);
+  e.printStackTrace(System.err);
   return 1;
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56acbe47/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 1411a7f..4f9f260 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -121,6 +121,9 @@ import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
 import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -2354,4 +2357,38 @@ public class DFSTestUtil {
 }
 return closedFiles;
   }
+
+  /**
+   * Check the correctness of the snapshotDiff report.
+   * Make sure all items in the passed entries are in the snapshotDiff
+   * report.
+   */
+  public static void verifySnapshotDiffReport(DistributedFileSystem fs,
+  Path dir, String from, String to,
+  DiffReportEntry... entries) throws IOException {
+SnapshotDiffReport report = fs.getSnapshotDiffReport(dir, from, to);
+// reverse the order of from and to
+SnapshotDiffReport inverseReport = fs
+.getSnapshotDiffReport(dir, to, from);
+LOG.info(report.toString());
+LOG.info(inverseReport.toString() + "\n");
+
+assertEquals(entries.length, report.getDiffList().size());
+assertEquals(entries.length, inverseReport.getDiffList().size());
+
+for (DiffReportEntry entry : entries) {
+  if (entry.getType() == DiffType.MODIFY) {
+assertTrue(report.getDiffList().contains(entry));
+assertTrue(inverseReport.getDiffList().contains(entry));
+  } else if (entry.getType() == DiffType.DELETE) {
+assertTrue(report.getDiffList().contains(entry));
+assertTrue(inverseReport.getDiffList().contains(
+new DiffReportEntry(DiffType.CREATE, entry.getSourcePath(;
+  } else if (entry.getType() == DiffType.CREATE) {
+assertTrue(report.getDiffList().contains(entry));
+assertTrue(inverseReport.getDiffList().contains(
+new DiffReportEntry(DiffType.DELETE, entry.getSourcePath(;
+  }
+}
+  }
 }

http://git-wip-us.apache.org/rep

[01/14] hadoop git commit: HADOOP-14667. Flexible Visual Studio support. Contributed by Allen Wittenauer

2018-04-13 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 8df8cad01 -> 994c7d66e


HADOOP-14667. Flexible Visual Studio support. Contributed by Allen Wittenauer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffc39ec3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffc39ec3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffc39ec3

Branch: refs/heads/branch-3.1
Commit: ffc39ec3ab7939adfc4464372960251d32eb6435
Parents: 8df8cad
Author: Chris Douglas 
Authored: Mon Mar 19 16:05:55 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:11:35 2018 -0700

--
 BUILDING.txt| 30 
 dev-support/bin/win-vs-upgrade.cmd  | 39 
 dev-support/win-paths-eg.cmd| 49 
 hadoop-common-project/hadoop-common/pom.xml | 28 +++
 .../src/main/native/native.vcxproj  |  2 +
 .../hadoop-hdfs-native-client/pom.xml   |  5 +-
 6 files changed, 128 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffc39ec3/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 6c266e5..6d752d4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -348,7 +348,7 @@ Requirements:
 * Maven 3.0 or later
 * ProtocolBuffer 2.5.0
 * CMake 3.1 or newer
-* Windows SDK 7.1 or Visual Studio 2010 Professional
+* Visual Studio 2010 Professional or Higher
 * Windows SDK 8.1 (if building CPU rate control for the container executor)
 * zlib headers (if building native code bindings for zlib)
 * Internet connection for first build (to fetch all Maven and Hadoop 
dependencies)
@@ -359,18 +359,15 @@ Requirements:
 Unix command-line tools are also included with the Windows Git package which
 can be downloaded from http://git-scm.com/downloads
 
-If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
+If using Visual Studio, it must be Professional level or higher.
 Do not use Visual Studio Express.  It does not support compiling for 64-bit,
-which is problematic if running a 64-bit system.  The Windows SDK 7.1 is free 
to
-download here:
-
-http://www.microsoft.com/en-us/download/details.aspx?id=8279
+which is problematic if running a 64-bit system.
 
 The Windows SDK 8.1 is available to download at:
 
 http://msdn.microsoft.com/en-us/windows/bg162891.aspx
 
-Cygwin is neither required nor supported.
+Cygwin is not required.
 
 
--
 Building:
@@ -378,21 +375,12 @@ Building:
 Keep the source code tree in a short path to avoid running into problems 
related
 to Windows maximum path length limitation (for example, C:\hdc).
 
-Run builds from a Windows SDK Command Prompt. (Start, All Programs,
-Microsoft Windows SDK v7.1, Windows SDK 7.1 Command Prompt).
-
-JAVA_HOME must be set, and the path must not contain spaces. If the full path
-would contain spaces, then use the Windows short path instead.
-
-You must set the Platform environment variable to either x64 or Win32 depending
-on whether you're running a 64-bit or 32-bit system. Note that this is
-case-sensitive. It must be "Platform", not "PLATFORM" or "platform".
-Environment variables on Windows are usually case-insensitive, but Maven treats
-them as case-sensitive. Failure to set this environment variable correctly will
-cause msbuild to fail while building the native code in hadoop-common.
+There is one support command file located in dev-support called 
win-paths-eg.cmd.
+It should be copied somewhere convenient and modified to fit your needs.
 
-set Platform=x64 (when building on a 64-bit system)
-set Platform=Win32 (when building on a 32-bit system)
+win-paths-eg.cmd sets up the environment for use. You will need to modify this
+file. It will put all of the required components in the command path,
+configure the bit-ness of the build, and set several optional components.
 
 Several tests require that the user must have the Create Symbolic Links
 privilege.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffc39ec3/dev-support/bin/win-vs-upgrade.cmd
--
diff --git a/dev-support/bin/win-vs-upgrade.cmd 
b/dev-support/bin/win-vs-upgrade.cmd
new file mode 100644
index 000..d8c9d73
--- /dev/null
+++ b/dev-support/bin/win-vs-upgrade.cmd
@@ -0,0 +1,39 @@
+@ECHO OFF
+@REM Licensed to the Apache Software Foundation (ASF) under one or more
+@REM contributor license agreements.  See the NOTICE file distributed with
+@REM this work for additional information regarding copyright ownership.
+@REM The ASF licenses this file to You under the Apache License, Version 2.0
+@REM (the "L

[04/14] hadoop git commit: HADOOP-15331. Fix a race condition causing parsing error of java.io.BufferedInputStream in class org.apache.hadoop.conf.Configuration. Contributed by Miklos Szegedi.

2018-04-13 Thread arp
HADOOP-15331. Fix a race condition causing parsing error of 
java.io.BufferedInputStream in class org.apache.hadoop.conf.Configuration. 
Contributed by Miklos Szegedi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6891da1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6891da1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6891da1e

Branch: refs/heads/branch-3.1
Commit: 6891da1e750118f00aca58a2426d085055d8efc5
Parents: 922a0db
Author: Yufei Gu 
Authored: Thu Mar 22 11:04:37 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:13:34 2018 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   |  5 +++-
 .../apache/hadoop/conf/TestConfiguration.java   | 31 
 2 files changed, 35 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6891da1e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 0b2196b..a69e4c4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -816,8 +816,11 @@ public class Configuration implements 
Iterable>,
*/
   @SuppressWarnings("unchecked")
   public Configuration(Configuration other) {
-this.resources = (ArrayList) other.resources.clone();
 synchronized(other) {
+  // Make sure we clone a finalized state
+  // Resources like input streams can be processed only once
+  other.getProps();
+  this.resources = (ArrayList) other.resources.clone();
   if (other.properties != null) {
 this.properties = (Properties)other.properties.clone();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6891da1e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 265e007..c8e5f17 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.conf;
 
+import java.io.BufferedInputStream;
 import java.io.BufferedWriter;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -2433,4 +2434,34 @@ public class TestConfiguration {
   System.setOut(output);
 }
   }
+
+  /**
+   * Test race conditions between clone() and getProps().
+   * Test for race conditions in the way Hadoop handles the Configuration
+   * class. The scenario is the following. Let's assume that there are two
+   * threads sharing the same Configuration class. One adds some resources
+   * to the configuration, while the other one clones it. Resources are
+   * loaded lazily in a deferred call to loadResources(). If the cloning
+   * happens after adding the resources but before parsing them, some temporary
+   * resources like input stream pointers are cloned. Eventually both copies
+   * will load the same input stream resources.
+   * One parses the input stream XML and closes it updating it's own copy of
+   * the resource. The other one has another pointer to the same input stream.
+   * When it tries to load it, it will crash with a stream closed exception.
+   */
+  @Test
+  public void testResourceRace() {
+InputStream is =
+new BufferedInputStream(new ByteArrayInputStream(
+"".getBytes()));
+Configuration config = new Configuration();
+// Thread 1
+config.addResource(is);
+// Thread 2
+Configuration confClone = new Configuration(conf);
+// Thread 2
+confClone.get("firstParse");
+// Thread 1
+config.get("secondParse");
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/14] hadoop git commit: YARN-8048. Support auto-spawning of admin configured services during bootstrap of RM (Rohith Sharma K S via wangda)

2018-04-13 Thread arp
YARN-8048. Support auto-spawning of admin configured services during bootstrap 
of RM (Rohith Sharma K S via wangda)

Change-Id: I2d8d61ccad55e1118009294d7e17822df3cd0fd5


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7031a853
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7031a853
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7031a853

Branch: refs/heads/branch-3.1
Commit: 7031a853f4fee3bd097ab5813006a1953c06b6cf
Parents: d98c3ca
Author: Wangda Tan 
Authored: Fri Apr 6 21:24:58 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:18:48 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../hadoop-yarn-services-api/pom.xml|   5 +
 .../client/SystemServiceManagerImpl.java| 381 +++
 .../service/client/TestSystemServiceImpl.java   | 180 +
 .../users/sync/user1/example-app1.yarnfile  |  16 +
 .../users/sync/user1/example-app2.yarnfile  |  16 +
 .../users/sync/user1/example-app3.json  |  16 +
 .../users/sync/user2/example-app1.yarnfile  |  16 +
 .../users/sync/user2/example-app2.yarnfile  |  16 +
 .../yarn/service/conf/YarnServiceConf.java  |   2 +
 .../yarn/service/TestSystemServiceManager.java  | 156 
 .../server/service/SystemServiceManager.java|  25 ++
 .../yarn/server/service/package-info.java   |  27 ++
 .../server/resourcemanager/ResourceManager.java |  30 +-
 14 files changed, 889 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7031a853/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9898cba..58c288b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -341,6 +341,10 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_API_SERVICES_ENABLE = "yarn."
   + "webapp.api-service.enable";
 
+  @Private
+  public static final String DEFAULT_YARN_API_SYSTEM_SERVICES_CLASS =
+  "org.apache.hadoop.yarn.service.client.SystemServiceManagerImpl";
+
   public static final String RM_RESOURCE_TRACKER_ADDRESS =
 RM_PREFIX + "resource-tracker.address";
   public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7031a853/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
index 0dfa92d..d45da09 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
@@ -71,6 +71,7 @@
 
   
 **/*.json
+**/*.yarnfile
   
 
   
@@ -96,6 +97,10 @@
 
 
   org.apache.hadoop
+  hadoop-yarn-server-common
+
+
+  org.apache.hadoop
   hadoop-common
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7031a853/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
new file mode 100644
index 000..225f8bd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * T

[03/14] hadoop git commit: HADOOP-15330. Remove jdk1.7 profile from hadoop-annotations module

2018-04-13 Thread arp
HADOOP-15330. Remove jdk1.7 profile from hadoop-annotations module

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/922a0db1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/922a0db1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/922a0db1

Branch: refs/heads/branch-3.1
Commit: 922a0db13c16632baae2d7906082e625445bfe2a
Parents: 9014f98
Author: fang zhenyi 
Authored: Wed Mar 21 14:01:26 2018 +0900
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:12:27 2018 -0700

--
 hadoop-common-project/hadoop-annotations/pom.xml | 15 ---
 1 file changed, 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/922a0db1/hadoop-common-project/hadoop-annotations/pom.xml
--
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml 
b/hadoop-common-project/hadoop-annotations/pom.xml
index af9f183..c11d9ba 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -39,21 +39,6 @@
 
   
 
-  jdk1.7
-  
-1.7
-  
-  
-
-  jdk.tools
-  jdk.tools
-  1.7
-  system
-  ${java.home}/../lib/tools.jar
-
-  
-
-
   jdk1.8
   
 1.8


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/14] hadoop git commit: YARN-8091. Revisit checkUserAccessToQueue RM REST API. (wangda)

2018-04-13 Thread arp
YARN-8091. Revisit checkUserAccessToQueue RM REST API. (wangda)

Change-Id: I5fab3fe229c34e967487b7327c7b3c8ddf7cb795


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/994c7d66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/994c7d66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/994c7d66

Branch: refs/heads/branch-3.1
Commit: 994c7d66e02ef27db234bece49526c7a4c08f3b2
Parents: 7031a85
Author: Wangda Tan 
Authored: Mon Apr 2 15:22:05 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:19:10 2018 -0700

--
 .../webapp/RMWebServiceProtocol.java|  3 +-
 .../resourcemanager/webapp/RMWebServices.java   | 40 ++--
 .../webapp/dao/RMQueueAclInfo.java  | 65 
 .../webapp/TestRMWebServices.java   | 52 +---
 .../webapp/DefaultRequestInterceptorREST.java   |  5 +-
 .../webapp/FederationInterceptorREST.java   |  3 +-
 .../server/router/webapp/RouterWebServices.java |  3 +-
 .../webapp/MockRESTRequestInterceptor.java  |  5 +-
 .../PassThroughRESTRequestInterceptor.java  |  3 +-
 9 files changed, 127 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/994c7d66/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
index 423c4e1..85ea07d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
@@ -53,6 +53,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.RMQueueAclInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo;
@@ -673,7 +674,7 @@ public interface RMWebServiceProtocol {
* @throws AuthorizationException if the user is not authorized to invoke 
this
*method.
*/
-  Response checkUserAccessToQueue(String queue, String username,
+  RMQueueAclInfo checkUserAccessToQueue(String queue, String username,
   String queueAclType, HttpServletRequest hsr)
   throws AuthorizationException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/994c7d66/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index c40e8be..d30764d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -173,6 +173,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntr
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;

[10/14] hadoop git commit: YARN-1151. Ability to configure auxiliary services from HDFS-based JAR files. (Xuan Gong via wangda)

2018-04-13 Thread arp
YARN-1151. Ability to configure auxiliary services from HDFS-based JAR files. 
(Xuan Gong via wangda)

Change-Id: Ied37ff11e507fc86847753ba79486652c8fadfe9


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9623714
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9623714
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9623714

Branch: refs/heads/branch-3.1
Commit: c962371430b8957500397fe71e944c953f46ed7d
Parents: 077eda6
Author: Wangda Tan 
Authored: Fri Apr 6 21:25:57 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:17:47 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +
 .../containermanager/AuxServices.java   | 160 +-
 .../containermanager/ContainerManagerImpl.java  |   3 +-
 .../containermanager/TestAuxServices.java   | 167 +--
 4 files changed, 313 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9623714/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a2cc9b7..9898cba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2086,6 +2086,9 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_AUX_SERVICES_CLASSPATH =
   NM_AUX_SERVICES + ".%s.classpath";
 
+  public static final String NM_AUX_SERVICE_REMOTE_CLASSPATH =
+  NM_AUX_SERVICES + ".%s.remote-classpath";
+
   public static final String NM_AUX_SERVICES_SYSTEM_CLASSES =
   NM_AUX_SERVICES + ".%s.system-classes";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9623714/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index 57cca50..c8b7a76 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager;
 
+import java.io.IOException;
+import java.net.URI;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Collections;
@@ -29,45 +31,70 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.service.ServiceStateChangeListener;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
 import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
 import org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler;
 import org.apache.hadoop.yarn.server.api.AuxiliaryService;
 import org.apache.hadoop.yarn.server.api.ContainerInitializationContext;
 import org.apache.hadoop.yarn.server.ap

[11/14] hadoop git commit: YARN-8028. Support authorizeUserAccessToQueue in RMWebServices. Contributed by Wangda Tan.

2018-04-13 Thread arp
YARN-8028. Support authorizeUserAccessToQueue in RMWebServices. Contributed by 
Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21717db6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21717db6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21717db6

Branch: refs/heads/branch-3.1
Commit: 21717db6a02b4e619033bfab8bd82a0f52c892c4
Parents: c962371
Author: Sunil G 
Authored: Sun Mar 18 11:00:30 2018 +0530
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:18:12 2018 -0700

--
 .../security/QueueACLsManager.java  |  1 -
 .../resourcemanager/webapp/RMWSConsts.java  |  8 ++
 .../webapp/RMWebServiceProtocol.java| 18 +
 .../resourcemanager/webapp/RMWebServices.java   | 55 -
 .../webapp/TestRMWebServices.java   | 84 
 .../webapp/DefaultRequestInterceptorREST.java   |  9 +++
 .../webapp/FederationInterceptorREST.java   |  6 ++
 .../server/router/webapp/RouterWebServices.java | 17 
 .../webapp/MockRESTRequestInterceptor.java  |  6 ++
 .../PassThroughRESTRequestInterceptor.java  |  8 ++
 10 files changed, 208 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21717db6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java
index 530cb25..4c22a55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java
@@ -114,7 +114,6 @@ public class QueueACLsManager {
 // version is added for the moving the application case. The check has
 // extra logging to distinguish between the queue not existing in the
 // application move request case and the real access denied case.
-
 if (scheduler instanceof CapacityScheduler) {
   CSQueue queue = ((CapacityScheduler) scheduler).getQueue(targetQueue);
   if (queue == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21717db6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
index 5a945da..29ae81b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
@@ -174,6 +174,12 @@ public final class RMWSConsts {
   public static final String GET_CONTAINER =
   "/apps/{appid}/appattempts/{appattemptid}/containers/{containerid}";
 
+  /**
+   * Path for {code checkUserAccessToQueue#}
+   */
+  public static final String CHECK_USER_ACCESS_TO_QUEUE =
+  "/queues/{queue}/access";
+
   // QueryParams for RMWebServiceProtocol
 
   public static final String TIME = "time";
@@ -183,6 +189,7 @@ public final class RMWSConsts {
   public static final String FINAL_STATUS = "finalStatus";
   public static final String USER = "user";
   public static final String QUEUE = "queue";
+  public static final String QUEUES = "queues";
   public static final String LIMIT = "limit";
   public static final String STARTED_TIME_BEGIN = "startedTimeBegin";
   public static final String STARTED_TIME_END = "startedTimeEnd";
@@ -209,6 +216,7 @@ public final class RMWSConsts {
   public static final String GET_LABELS = "get-labels";
   public static final String DESELECTS = "deSelects";
   public sta

[12/14] hadoop git commit: YARN-8040. [UI2] New YARN UI webapp does not respect current pathname for REST api. Contributed by Sunil G.

2018-04-13 Thread arp
YARN-8040. [UI2] New YARN UI webapp does not respect current pathname for REST 
api. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d98c3ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d98c3ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d98c3ca5

Branch: refs/heads/branch-3.1
Commit: d98c3ca5fd8795eb10fe3e68215934f083ab1135
Parents: 21717db
Author: Sunil G 
Authored: Sun Mar 18 10:44:33 2018 +0530
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:18:39 2018 -0700

--
 .../hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d98c3ca5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index d047ed1..83df971 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -40,7 +40,7 @@ function getTimeLineV1URL(rmhost) {
 
 function updateConfigs(application) {
   var hostname = window.location.hostname;
-  var rmhost = hostname + (window.location.port ? ':' + window.location.port: 
'');
+  var rmhost = hostname + (window.location.port ? ':' + window.location.port: 
'') + skipTrailingSlash(window.location.pathname);
 
   if(!ENV.hosts.rmWebAddress) {
 ENV.hosts.rmWebAddress = rmhost;
@@ -130,3 +130,8 @@ export default {
   before: 'env',
   initialize
 };
+
+const skipTrailingSlash = function(path) {
+  path = path.replace('ui2/', '');
+  return path.replace(/\/$/, '');
+};


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/14] hadoop git commit: HADOOP-15332. Fix typos in hadoop-aws markdown docs. Contributed by Gabor Bota.

2018-04-13 Thread arp
HADOOP-15332. Fix typos in hadoop-aws markdown docs. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b46bd3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b46bd3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b46bd3f

Branch: refs/heads/branch-3.1
Commit: 2b46bd3f4548fefe292e9fedffe2fdf7f2f12be7
Parents: 6891da1
Author: Sean Mackrory 
Authored: Tue Mar 20 21:11:51 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:13:56 2018 -0700

--
 .../tools/hadoop-aws/committer_architecture.md  | 76 ++--
 .../markdown/tools/hadoop-aws/committers.md | 18 ++---
 .../markdown/tools/hadoop-aws/encryption.md |  8 +--
 .../src/site/markdown/tools/hadoop-aws/index.md | 34 -
 .../site/markdown/tools/hadoop-aws/s3guard.md   |  4 +-
 .../site/markdown/tools/hadoop-aws/testing.md   | 20 +++---
 .../tools/hadoop-aws/troubleshooting_s3a.md |  2 +-
 7 files changed, 81 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b46bd3f/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
index b974ea8..e4ba75d 100644
--- 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
+++ 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/committer_architecture.md
@@ -28,7 +28,7 @@ The standard commit algorithms (the `FileOutputCommitter` and 
its v1 and v2 algo
 rely on directory rename being an `O(1)` atomic operation: callers output their
 work to temporary directories in the destination filesystem, then
 rename these directories to the final destination as way of committing work.
-This is the perfect solution for commiting work against any filesystem with
+This is the perfect solution for committing work against any filesystem with
 consistent listing operations and where the `FileSystem.rename()` command
 is an atomic `O(1)` operation.
 
@@ -60,7 +60,7 @@ delayed completion of multi-part PUT operations
 That is: tasks write all data as multipart uploads, *but delay the final
 commit action until until the final, single job commit action.* Only that
 data committed in the job commit action will be made visible; work from 
speculative
-and failed tasks will not be instiantiated. As there is no rename, there is no
+and failed tasks will not be instantiated. As there is no rename, there is no
 delay while data is copied from a temporary directory to the final directory.
 The duration of the commit will be the time needed to determine which commit 
operations
 to construct, and to execute them.
@@ -109,7 +109,7 @@ This is traditionally implemented via a 
`FileSystem.rename()` call.
 
   It is useful to differentiate between a *task-side commit*: an operation 
performed
   in the task process after its work, and a *driver-side task commit*, in which
-  the Job driver perfoms the commit operation. Any task-side commit work will
+  the Job driver performs the commit operation. Any task-side commit work will
   be performed across the cluster, and may take place off the critical part for
   job execution. However, unless the commit protocol requires all tasks to 
await
   a signal from the job driver, task-side commits cannot instantiate their 
output
@@ -241,7 +241,7 @@ def commitTask(fs, jobAttemptPath, taskAttemptPath, dest):
 fs.rename(taskAttemptPath, taskCommittedPath)
 ```
 
-On a genuine fileystem this is an `O(1)` directory rename.
+On a genuine filesystem this is an `O(1)` directory rename.
 
 On an object store with a mimiced rename, it is `O(data)` for the copy,
 along with overhead for listing and deleting all files (For S3, that's
@@ -257,13 +257,13 @@ def abortTask(fs, jobAttemptPath, taskAttemptPath, dest):
   fs.delete(taskAttemptPath, recursive=True)
 ```
 
-On a genuine fileystem this is an `O(1)` operation. On an object store,
+On a genuine filesystem this is an `O(1)` operation. On an object store,
 proportional to the time to list and delete files, usually in batches.
 
 
 ### Job Commit
 
-Merge all files/directories in all task commited paths into final destination 
path.
+Merge all files/directories in all task committed paths into final destination 
path.
 Optionally; create 0-byte `_SUCCESS` file in destination path.
 
 ```python
@@ -420,9 +420,9 @@ by renaming the files.
 A a key difference is that the v1 algorithm commits a source directory to
 via a directory rename, which is traditionally an `O(1)` operation.
 
-In constrast, the v2 algorithm lists al

[08/14] hadoop git commit: HDFS-11900. Hedged reads thread pool creation not synchronized. Contributed by John Zhuge.

2018-04-13 Thread arp
HDFS-11900. Hedged reads thread pool creation not synchronized. Contributed by 
John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/696d7860
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/696d7860
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/696d7860

Branch: refs/heads/branch-3.1
Commit: 696d786063878509e514829357e3838f4f9377a8
Parents: 867135a
Author: Wei-Chiu Chuang 
Authored: Thu Mar 22 11:29:31 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:16:23 2018 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/696d7860/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index af7b540..0875328 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2910,7 +2910,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
* @param num Number of threads for hedged reads thread pool.
* If zero, skip hedged reads thread pool creation.
*/
-  private synchronized void initThreadsNumForHedgedReads(int num) {
+  private static synchronized void initThreadsNumForHedgedReads(int num) {
 if (num <= 0 || HEDGED_READ_THREAD_POOL != null) return;
 HEDGED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60,
 TimeUnit.SECONDS, new SynchronousQueue(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/14] hadoop git commit: MAPREDUCE-7066. TestQueue fails on Java9

2018-04-13 Thread arp
MAPREDUCE-7066. TestQueue fails on Java9

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/077eda66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/077eda66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/077eda66

Branch: refs/heads/branch-3.1
Commit: 077eda66ade5c552bbf41dd0de70b49dafb957be
Parents: 696d7860
Author: Takanobu Asanuma 
Authored: Mon Mar 19 09:39:49 2018 +0900
Committer: Arpit Agarwal 
Committed: Fri Apr 13 10:17:34 2018 -0700

--
 .../src/test/java/org/apache/hadoop/mapred/TestQueue.java | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/077eda66/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java
index 796bbee..b2908bf 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java
@@ -153,9 +153,10 @@ public class TestQueue {
   writer = new StringWriter();
   QueueManager.dumpConfiguration(writer, conf);
   result = writer.toString();
-  assertEquals(
-  
"{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\"
 \",\"acl_administer_jobs\":\" 
\",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\"
 \",\"acl_administer_jobs\":\" 
\",\"properties\":[{\"key\":\"capacity\",\"value\":\"20\"},{\"key\":\"user-limit\",\"value\":\"30\"}],\"children\":[]}]}]}",
-  result);
+  
assertTrue(result.contains("{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\"
 \",\"acl_administer_jobs\":\" 
\",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\"
 \",\"acl_administer_jobs\":\" \",\"properties\":["));
+  assertTrue(result.contains("{\"key\":\"capacity\",\"value\":\"20\"}"));
+  assertTrue(result.contains("{\"key\":\"user-limit\",\"value\":\"30\"}"));
+  assertTrue(result.contains("],\"children\":[]}]}]}"));
   // test constructor QueueAclsInfo
   QueueAclsInfo qi = new QueueAclsInfo();
   assertNull(qi.getQueueName());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13330. ShortCircuitCache#fetchOrCreate never retries. Contributed by Gabor Bota.

2018-04-13 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 994c7d66e -> ca8bb322b


HDFS-13330. ShortCircuitCache#fetchOrCreate never retries. Contributed by Gabor 
Bota.

(cherry picked from commit e66e287efe2b43e710137a628f03c7df3ebdf498)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca8bb322
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca8bb322
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca8bb322

Branch: refs/heads/branch-3.1
Commit: ca8bb322be5e21ce00bb49cecfc4f515b49d2a37
Parents: 994c7d6
Author: Wei-Chiu Chuang 
Authored: Fri Apr 13 09:17:34 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Apr 13 12:18:49 2018 -0700

--
 .../hdfs/shortcircuit/ShortCircuitCache.java| 11 ++---
 .../shortcircuit/TestShortCircuitCache.java | 26 
 2 files changed, 33 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca8bb322/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index b26652b..c2f0350 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -664,6 +664,7 @@ public class ShortCircuitCache implements Closeable {
 unref(replica);
   }
 
+  static final int FETCH_OR_CREATE_RETRY_TIMES = 3;
   /**
* Fetch or create a replica.
*
@@ -678,11 +679,11 @@ public class ShortCircuitCache implements Closeable {
*/
   public ShortCircuitReplicaInfo fetchOrCreate(ExtendedBlockId key,
   ShortCircuitReplicaCreator creator) {
-Waitable newWaitable = null;
+Waitable newWaitable;
 lock.lock();
 try {
   ShortCircuitReplicaInfo info = null;
-  do {
+  for (int i = 0; i < FETCH_OR_CREATE_RETRY_TIMES; i++){
 if (closed) {
   LOG.trace("{}: can't fethchOrCreate {} because the cache is closed.",
   this, key);
@@ -692,11 +693,12 @@ public class ShortCircuitCache implements Closeable {
 if (waitable != null) {
   try {
 info = fetch(key, waitable);
+break;
   } catch (RetriableException e) {
 LOG.debug("{}: retrying {}", this, e.getMessage());
   }
 }
-  } while (false);
+  }
   if (info != null) return info;
   // We need to load the replica ourselves.
   newWaitable = new Waitable<>(lock.newCondition());
@@ -717,7 +719,8 @@ public class ShortCircuitCache implements Closeable {
*
* @throws RetriableException   If the caller needs to retry.
*/
-  private ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
+  @VisibleForTesting // ONLY for testing
+  protected ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
   Waitable waitable) throws RetriableException {
 // Another thread is already in the process of loading this
 // ShortCircuitReplica.  So we simply wait for it to complete.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca8bb322/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 7ba0edc..5da6a25 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.ShortCircuitReplica
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -793,4 +794,29 @@ public class TestShortCircuitCache {
 cluster.shutdown();
 sockDir.close();
   }
+
+  @Test
+  public void testFetchOrCreateRetries() throws Exception {
+tr

hadoop git commit: HDFS-13330. ShortCircuitCache#fetchOrCreate never retries. Contributed by Gabor Bota.

2018-04-13 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 bce57a139 -> dc01e323e


HDFS-13330. ShortCircuitCache#fetchOrCreate never retries. Contributed by Gabor 
Bota.

(cherry picked from commit e66e287efe2b43e710137a628f03c7df3ebdf498)
(cherry picked from commit ca8bb322be5e21ce00bb49cecfc4f515b49d2a37)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc01e323
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc01e323
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc01e323

Branch: refs/heads/branch-3.0
Commit: dc01e323ed24c22f9db88af4963654a037f72e75
Parents: bce57a1
Author: Wei-Chiu Chuang 
Authored: Fri Apr 13 09:17:34 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Apr 13 12:20:01 2018 -0700

--
 .../hdfs/shortcircuit/ShortCircuitCache.java| 11 ++---
 .../shortcircuit/TestShortCircuitCache.java | 26 
 2 files changed, 33 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc01e323/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index b26652b..c2f0350 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -664,6 +664,7 @@ public class ShortCircuitCache implements Closeable {
 unref(replica);
   }
 
+  static final int FETCH_OR_CREATE_RETRY_TIMES = 3;
   /**
* Fetch or create a replica.
*
@@ -678,11 +679,11 @@ public class ShortCircuitCache implements Closeable {
*/
   public ShortCircuitReplicaInfo fetchOrCreate(ExtendedBlockId key,
   ShortCircuitReplicaCreator creator) {
-Waitable newWaitable = null;
+Waitable newWaitable;
 lock.lock();
 try {
   ShortCircuitReplicaInfo info = null;
-  do {
+  for (int i = 0; i < FETCH_OR_CREATE_RETRY_TIMES; i++){
 if (closed) {
   LOG.trace("{}: can't fethchOrCreate {} because the cache is closed.",
   this, key);
@@ -692,11 +693,12 @@ public class ShortCircuitCache implements Closeable {
 if (waitable != null) {
   try {
 info = fetch(key, waitable);
+break;
   } catch (RetriableException e) {
 LOG.debug("{}: retrying {}", this, e.getMessage());
   }
 }
-  } while (false);
+  }
   if (info != null) return info;
   // We need to load the replica ourselves.
   newWaitable = new Waitable<>(lock.newCondition());
@@ -717,7 +719,8 @@ public class ShortCircuitCache implements Closeable {
*
* @throws RetriableException   If the caller needs to retry.
*/
-  private ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
+  @VisibleForTesting // ONLY for testing
+  protected ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
   Waitable waitable) throws RetriableException {
 // Another thread is already in the process of loading this
 // ShortCircuitReplica.  So we simply wait for it to complete.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc01e323/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 7ba0edc..5da6a25 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.ShortCircuitReplica
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -793,4 +794,29 @@ public class TestShortCircuitCache {
 cluster.shutdown();
 sockDir.close();
   }
+
+  @Test

hadoop git commit: YARN-8142. Improve SIGTERM handling for YARN Service Application Master. Contributed by Billie Rinaldi

2018-04-13 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk e66e287ef -> 9031a76d4


YARN-8142.  Improve SIGTERM handling for YARN Service Application Master.
Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9031a76d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9031a76d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9031a76d

Branch: refs/heads/trunk
Commit: 9031a76d447f0c5eaa392144fd17c5b9812e1b20
Parents: e66e287
Author: Eric Yang 
Authored: Fri Apr 13 15:34:33 2018 -0400
Committer: Eric Yang 
Committed: Fri Apr 13 15:34:33 2018 -0400

--
 .../hadoop/yarn/service/ClientAMService.java|  1 +
 .../hadoop/yarn/service/ServiceScheduler.java   | 41 +++
 .../hadoop/yarn/service/ServiceTestUtils.java   | 11 +++
 .../yarn/service/TestYarnNativeServices.java| 71 
 4 files changed, 110 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9031a76d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
index 08c36f4..3d037e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
@@ -125,6 +125,7 @@ public class ClientAMService extends AbstractService
 LOG.info("Stop the service by {}", UserGroupInformation.getCurrentUser());
 context.scheduler.getDiagnostics()
 .append("Stopped by user " + UserGroupInformation.getCurrentUser());
+context.scheduler.setGracefulStop();
 
 // Stop the service in 2 seconds delay to make sure this rpc call is 
completed.
 // shutdown hook will be executed which will stop AM gracefully.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9031a76d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 0fcca16..7eddef9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -156,6 +156,8 @@ public class ServiceScheduler extends CompositeService {
   // requests for a single service is not recommended.
   private boolean hasAtLeastOnePlacementConstraint;
 
+  private boolean gracefulStop = false;
+
   public ServiceScheduler(ServiceContext context) {
 super(context.service.getName());
 this.context = context;
@@ -199,6 +201,7 @@ public class ServiceScheduler extends CompositeService {
 addIfService(amRMClient);
 
 nmClient = createNMClient();
+nmClient.getClient().cleanupRunningContainersOnStop(false);
 addIfService(nmClient);
 
 dispatcher = new AsyncDispatcher("Component  dispatcher");
@@ -252,6 +255,11 @@ public class ServiceScheduler extends CompositeService {
 .createAMRMClientAsync(1000, new AMRMClientCallback());
   }
 
+  protected void setGracefulStop() {
+this.gracefulStop = true;
+nmClient.getClient().cleanupRunningContainersOnStop(true);
+  }
+
   @Override
   public void serviceInit(Configuration conf) throws Exception {
 try {
@@ -266,26 +274,31 @@ public class ServiceScheduler extends CompositeService {
   public void serviceStop() throws Exception {
 LOG.info("Stopping service scheduler");
 
-// Mark component-instances/containers as STOPPED
-if (YarnConfiguration.timelineServic

hadoop git commit: HDFS-13197. Ozone: Fix ConfServlet#getOzoneTags cmd. Contributed by Ajay Kumar.

2018-04-13 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 584c573a5 -> 66610b5fd


HDFS-13197. Ozone: Fix ConfServlet#getOzoneTags cmd. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66610b5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66610b5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66610b5f

Branch: refs/heads/HDFS-7240
Commit: 66610b5fd5dc29c1bff006874bf46d426d3a9dfa
Parents: 584c573
Author: Xiaoyu Yao 
Authored: Fri Apr 13 13:42:57 2018 -0700
Committer: Xiaoyu Yao 
Committed: Fri Apr 13 13:42:57 2018 -0700

--
 .../hadoop/hdds/conf/HddsConfServlet.java   | 181 +
 .../common/src/main/resources/ozone-default.xml |  10 +
 .../hadoop/hdds/server/BaseHttpServer.java  |  10 +-
 .../src/main/resources/webapps/static/ozone.js  | 668 ++-
 .../webapps/static/templates/config.html|  28 +-
 5 files changed, 562 insertions(+), 335 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66610b5f/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
new file mode 100644
index 000..068e41f
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import com.google.gson.Gson;
+import java.io.IOException;
+import java.io.Writer;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.HttpHeaders;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpServer2;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A servlet to print out the running configuration data.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public class HddsConfServlet extends HttpServlet {
+
+  private static final long serialVersionUID = 1L;
+
+  protected static final String FORMAT_JSON = "json";
+  protected static final String FORMAT_XML = "xml";
+  private static final String COMMAND = "cmd";
+  private static final OzoneConfiguration OZONE_CONFIG =
+  new OzoneConfiguration();
+  transient Logger LOG = LoggerFactory.getLogger(HddsConfServlet.class);
+
+
+  /**
+   * Return the Configuration of the daemon hosting this servlet.
+   * This is populated when the HttpServer starts.
+   */
+  private Configuration getConfFromContext() {
+Configuration conf = (Configuration) getServletContext().getAttribute(
+HttpServer2.CONF_CONTEXT_ATTRIBUTE);
+assert conf != null;
+return conf;
+  }
+
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response)
+  throws ServletException, IOException {
+
+if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
+request, response)) {
+  return;
+}
+
+String format = parseAcceptHeader(request);
+if (FORMAT_XML.equals(format)) {
+  response.setContentType("text/xml; charset=utf-8");
+} else if (FORMAT_JSON.equals(format)) {
+  response.setContentType("application/json; charset=utf-8");
+}
+
+String name = request.getParameter("name");
+Writer out = response.getWriter();
+String cmd = request.getParameter(COMMAND);
+
+processCommand(cmd, format, request, response, out, name);
+out.close();
+  }
+

hadoop git commit: MAPREDUCE-7077. Pipe mapreduce job fails with Permission denied for jobTokenPassword. (Akira Ajisaka via wangda)

2018-04-13 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9031a76d4 -> 995cba65f


MAPREDUCE-7077. Pipe mapreduce job fails with Permission denied for 
jobTokenPassword. (Akira Ajisaka via wangda)

Change-Id: Ie8f01425d58409fa3661f768205b7616128c8aa4
(cherry picked from commit 035e0f97ea44b0495707949a781d8792dcf6ea6b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/995cba65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/995cba65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/995cba65

Branch: refs/heads/trunk
Commit: 995cba65fe29966583e36f9491d9a27b323918ae
Parents: 9031a76
Author: Wangda Tan 
Authored: Thu Apr 12 14:33:33 2018 -0700
Committer: Wangda Tan 
Committed: Fri Apr 13 13:52:10 2018 -0700

--
 .../apache/hadoop/mapred/pipes/Application.java  |  5 ++---
 .../hadoop/mapred/pipes/TestPipeApplication.java | 19 ---
 2 files changed, 10 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/995cba65/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
index 5c8aab9..83d2509 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.TaskAttemptID;
 import org.apache.hadoop.mapred.TaskLog;
-import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
@@ -104,8 +103,8 @@ class Applicationhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/995cba65/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
index 13597e0..88d8f95 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.IFile.Writer;
-import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapred.Counters;
@@ -84,10 +83,10 @@ public class TestPipeApplication {
   public void testRunner() throws Exception {
 
 // clean old password files
-JobConf conf = new JobConf();
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 try {
   RecordReader rReader = new 
ReaderPipesMapRunner();
+  JobConf conf = new JobConf();
   conf.set(Submitter.IS_JAVA_RR, "true");
   // for stdour and stderror
 
@@ -163,7 +162,7 @@ public class TestPipeApplication {
 
 TestTaskReporter reporter = new TestTaskReporter();
 
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 try {
 
   conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
@@ -248,7 +247,7 @@ public class TestPipeApplication {
 
 JobConf conf = new JobConf();
 
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 
 System.setProperty("test.build.data",
 "target/tmp/build/TEST_SUBMITTER_MAPPER/data");
@@ -389,8 +388,8 @@ public class TestPipeApplication {
   @Test
   public void testPipesReduser() throws Exception {
 
+File[] psw = cleanTo

[27/44] hadoop git commit: YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb logger. Contributed by Jason Lowe.

2018-04-13 Thread xyao
YARN-8120. JVM can crash with SIGSEGV when exiting due to custom leveldb 
logger. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bb128df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bb128df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bb128df

Branch: refs/heads/HDFS-7240
Commit: 6bb128dfb893cf0e4aa2d3ecc65440668a1fc8d7
Parents: d272056
Author: Eric E Payne 
Authored: Thu Apr 12 16:04:23 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 16:04:23 2018 +

--
 .../v2/hs/HistoryServerLeveldbStateStoreService.java| 11 ---
 .../java/org/apache/hadoop/mapred/ShuffleHandler.java   | 12 
 .../recovery/NMLeveldbStateStoreService.java| 12 
 .../resourcemanager/recovery/LeveldbRMStateStore.java   | 12 
 4 files changed, 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bb128df/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
index c8741aa..6d2e407 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
@@ -74,7 +74,6 @@ public class HistoryServerLeveldbStateStoreService extends
 Path storeRoot = createStorageDir(getConfig());
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LeveldbLogger());
 LOG.info("Using state database at " + storeRoot + " for recovery");
 File dbfile = new File(storeRoot.toString());
 try {
@@ -366,14 +365,4 @@ public class HistoryServerLeveldbStateStoreService extends
 + getCurrentVersion() + ", but loading version " + loadedVersion);
 }
   }
-
-  private static class LeveldbLogger implements org.iq80.leveldb.Logger {
-private static final Logger LOG =
-LoggerFactory.getLogger(LeveldbLogger.class);
-
-@Override
-public void log(String message) {
-  LOG.info(message);
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bb128df/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index ec992fe..aeda9cc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -93,7 +93,6 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.fusesource.leveldbjni.internal.NativeDB;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBException;
-import org.iq80.leveldb.Logger;
 import org.iq80.leveldb.Options;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -628,7 +627,6 @@ public class ShuffleHandler extends AuxiliaryService {
   private void startStore(Path recoveryRoot) throws IOException {
 Options options = new Options();
 options.createIfMissing(false);
-options.logger(new LevelDBLogger());
 Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
 LOG.info("Using state database at " + dbPath + " for recovery");
 File dbfile = new File(dbPath.toString());
@@ -774,16 +772,6 @@ public class ShuffleHandler extends AuxiliaryService {
 }
   }
 
-  private static class LevelDBLogger implements Logger {
-private static final org.slf4j.Logger LOG =
-LoggerFactory.getLogger(LevelDBLogger.class);
-
-@Override
-public void log(String message) {
-  LOG.info(message);
-}
-  }
-
   static class TimeoutHandler extends IdleSt

[07/44] hadoop git commit: YARN-8133. Doc link broken for yarn-service from overview page. (Rohith Sharma K S via wangda)

2018-04-13 Thread xyao
YARN-8133. Doc link broken for yarn-service from overview page. (Rohith Sharma 
K S via wangda)

Change-Id: Iacf9a004585dd59e1c0cd8f8c618a38f047cc0fe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d919eb6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d919eb6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d919eb6e

Branch: refs/heads/HDFS-7240
Commit: d919eb6efa1072517017c75fb323e391f4418dc8
Parents: 2bf9cc2
Author: Wangda Tan 
Authored: Tue Apr 10 17:33:15 2018 -0700
Committer: Wangda Tan 
Committed: Tue Apr 10 17:33:15 2018 -0700

--
 .../src/site/markdown/yarn-service/Concepts.md|  6 +++---
 .../src/site/markdown/yarn-service/Overview.md| 14 +++---
 .../src/site/markdown/yarn-service/QuickStart.md  | 12 ++--
 .../src/site/markdown/yarn-service/RegistryDNS.md |  4 ++--
 .../site/markdown/yarn-service/ServiceDiscovery.md|  4 ++--
 5 files changed, 20 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d919eb6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
index e567d03..5c77f17 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md
@@ -29,7 +29,7 @@ affinity and anti-affinity scheduling, log aggregation for 
services, automatical
 A restful API server is developed to allow users to deploy/manage their 
services on YARN via a simple JSON spec. This avoids users
 from dealing with the low-level APIs, writing complex code to bring their 
services onto YARN. The REST layer acts as a unified REST based entry for
 creation and lifecycle management of YARN services. Services here can range 
from simple single-component apps to the most complex, 
-multi-component applications needing special orchestration needs. Please refer 
to this [API doc](YarnServiceAPI.md) for detailed API documentations.
+multi-component applications needing special orchestration needs. Please refer 
to this [API doc](YarnServiceAPI.html) for detailed API documentations.
 
 The API-server is stateless, which means users can simply spin up multiple 
instances, and have a load balancer fronting them to 
 support HA, distribute the load etc.
@@ -37,10 +37,10 @@ support HA, distribute the load etc.
 ### Service Discovery
 A DNS server is implemented to enable discovering services on YARN via the 
standard mechanism: DNS lookup.
 
-The framework posts container information such as hostname and ip into the 
[YARN service registry](../registry/index.md). And the DNS server essentially 
exposes the
+The framework posts container information such as hostname and ip into the 
[YARN service registry](../registry/index.html). And the DNS server essentially 
exposes the
 information in YARN service registry by translating them into DNS records such 
as A record and SRV record.
 Clients can then discover the IPs of containers via standard DNS lookup.
 
 The previous read mechanisms of YARN Service Registry were limited to a 
registry specific (java) API and a REST interface and are difficult
-to wireup existing clients and services. The DNS based service discovery 
eliminates this gap. Please refer to this [Service Discovery 
doc](ServiceDiscovery.md)
+to wireup existing clients and services. The DNS based service discovery 
eliminates this gap. Please refer to this [Service Discovery 
doc](ServiceDiscovery.html)
 for more details.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d919eb6e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
index 72c2d3e..8e2bf9a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
@@ -46,16 +46,16 @@ The benefits of combining these workloads are two-fold:
 
 *`This feature is in alpha state`* and so APIs, command lines are subject to 
change. We will continue to update the documents over time.
 
-[QuickStart](QuickStart.md) shows a quick tutorial that walks you throu

[15/44] hadoop git commit: HDFS-13403: libhdfs++ Use hdfs::IoService object rather than asio::io_service. Contributed by James Clampffer.

2018-04-13 Thread xyao
HDFS-13403: libhdfs++ Use hdfs::IoService object rather than asio::io_service.  
Contributed by James Clampffer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eefe2a14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eefe2a14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eefe2a14

Branch: refs/heads/HDFS-7240
Commit: eefe2a147c83dbb62c4021b67d59d3b9f065f890
Parents: 7eb783e
Author: James Clampffer 
Authored: Wed Apr 11 10:27:23 2018 -0400
Committer: James Clampffer 
Committed: Wed Apr 11 10:27:23 2018 -0400

--
 .../native/libhdfspp/include/hdfspp/hdfspp.h|  53 +--
 .../native/libhdfspp/include/hdfspp/ioservice.h | 140 
 .../native/libhdfspp/lib/bindings/c/hdfs.cc |   7 +-
 .../native/libhdfspp/lib/common/CMakeLists.txt  |   2 +-
 .../native/libhdfspp/lib/common/async_stream.h  |  13 +-
 .../libhdfspp/lib/common/continuation/asio.h|   5 -
 .../libhdfspp/lib/common/hdfs_ioservice.cc  | 146 -
 .../libhdfspp/lib/common/hdfs_ioservice.h   |  79 -
 .../libhdfspp/lib/common/ioservice_impl.cc  | 159 +++
 .../libhdfspp/lib/common/ioservice_impl.h   |  76 +
 .../main/native/libhdfspp/lib/common/logging.h  |   3 -
 .../libhdfspp/lib/common/namenode_info.cc   |  15 +-
 .../native/libhdfspp/lib/common/namenode_info.h |   8 +-
 .../main/native/libhdfspp/lib/common/util.cc|  14 +-
 .../src/main/native/libhdfspp/lib/common/util.h |  25 ++-
 .../lib/connection/datanodeconnection.cc|  27 +++-
 .../lib/connection/datanodeconnection.h |  26 +--
 .../main/native/libhdfspp/lib/fs/filehandle.cc  |  18 +--
 .../main/native/libhdfspp/lib/fs/filehandle.h   |  12 +-
 .../main/native/libhdfspp/lib/fs/filesystem.cc  |  67 ++--
 .../main/native/libhdfspp/lib/fs/filesystem.h   |  66 ++--
 .../libhdfspp/lib/fs/namenode_operations.h  |   4 +-
 .../native/libhdfspp/lib/reader/block_reader.cc |  18 +--
 .../native/libhdfspp/lib/reader/block_reader.h  |  10 +-
 .../native/libhdfspp/lib/reader/datatransfer.h  |   4 +-
 .../libhdfspp/lib/rpc/namenode_tracker.cc   |   2 +-
 .../native/libhdfspp/lib/rpc/namenode_tracker.h |   4 +-
 .../main/native/libhdfspp/lib/rpc/request.cc|   5 +-
 .../native/libhdfspp/lib/rpc/rpc_connection.h   |   2 +-
 .../libhdfspp/lib/rpc/rpc_connection_impl.cc|  32 ++--
 .../libhdfspp/lib/rpc/rpc_connection_impl.h |   9 +-
 .../main/native/libhdfspp/lib/rpc/rpc_engine.cc |  14 +-
 .../main/native/libhdfspp/lib/rpc/rpc_engine.h  |   9 +-
 .../native/libhdfspp/tests/bad_datanode_test.cc |  31 ++--
 .../libhdfspp/tests/hdfs_ioservice_test.cc  |  10 +-
 .../native/libhdfspp/tests/mock_connection.h|   4 +-
 .../libhdfspp/tests/remote_block_reader_test.cc |   4 +-
 .../native/libhdfspp/tests/rpc_engine_test.cc   | 112 ++---
 38 files changed, 681 insertions(+), 554 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eefe2a14/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfspp.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfspp.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfspp.h
index 2fdeec9..e68a612 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfspp.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfspp.h
@@ -19,6 +19,7 @@
 #define LIBHDFSPP_HDFSPP_H_
 
 #include "hdfspp/options.h"
+#include "hdfspp/ioservice.h"
 #include "hdfspp/status.h"
 #include "hdfspp/events.h"
 #include "hdfspp/block_location.h"
@@ -31,62 +32,10 @@
 
 #include 
 #include 
-#include 
-#include 
 
 namespace hdfs {
 
 /**
- * An IoService manages a queue of asynchronous tasks. All libhdfs++
- * operations are filed against a particular IoService.
- *
- * When an operation is queued into an IoService, the IoService will
- * run the callback handler associated with the operation. Note that
- * the IoService must be stopped before destructing the objects that
- * post the operations.
- *
- * From an implementation point of view the hdfs::IoService provides
- * a thin wrapper over an asio::io_service object so that additional
- * instrumentation and functionality can be added.
- **/
-
-class IoService : public std::enable_shared_from_this
-{
- public:
-  static IoService *New();
-  static std::shared_ptr MakeShared();
-  virtual ~IoService();
-
-  /**
-   * Start up as many threads as there are logical processors.
-   * Return number of threads created.
-   **/
-  virtual unsigned int InitDefaultWorkers() = 0;
-
-  /**
-   * Initia

[14/44] hadoop git commit: HDFS-13403: libhdfs++ Use hdfs::IoService object rather than asio::io_service. Contributed by James Clampffer.

2018-04-13 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/eefe2a14/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h
index 8e579a2..1dd43af 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_connection_impl.h
@@ -26,6 +26,7 @@
 #include "common/logging.h"
 #include "common/util.h"
 #include "common/libhdfs_events_impl.h"
+#include "hdfspp/ioservice.h"
 
 #include 
 #include 
@@ -76,8 +77,8 @@ template 
 RpcConnectionImpl::RpcConnectionImpl(std::shared_ptr engine)
 : RpcConnection(engine),
   options_(engine->options()),
-  socket_(engine->io_service()),
-  connect_timer_(engine->io_service())
+  socket_(engine->io_service()->GetRaw()),
+  connect_timer_(engine->io_service()->GetRaw())
 {
   LOG_TRACE(kRPC, << "RpcConnectionImpl::RpcConnectionImpl called &" << 
(void*)this);
 }
@@ -353,7 +354,7 @@ void RpcConnectionImpl::FlushPendingRequests() {
 OnSendCompleted(ec, size);
   });
   } else {  // Nothing to send for this request, inform the handler immediately
-::asio::io_service *service = GetIoService();
+std::shared_ptr service = GetIoService();
 if(!service) {
   LOG_ERROR(kRPC, << "RpcConnectionImpl@" << this << " attempted to access 
null IoService");
   // No easy way to bail out of this context, but the only way to get here 
is when
@@ -361,7 +362,7 @@ void RpcConnectionImpl::FlushPendingRequests() {
   return;
 }
 
-service->post(
+service->PostTask(
 // Never hold locks when calling a callback
 [req]() { req->OnResponseArrived(nullptr, Status::OK()); }
 );

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eefe2a14/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc
index 0ca7c6a..ad6c9b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/rpc/rpc_engine.cc
@@ -30,7 +30,7 @@ template 
 using optional = std::experimental::optional;
 
 
-RpcEngine::RpcEngine(::asio::io_service *io_service, const Options &options,
+RpcEngine::RpcEngine(std::shared_ptr io_service, const Options 
&options,
  const std::string &client_name, const std::string 
&user_name,
  const char *protocol_name, int protocol_version)
 : io_service_(io_service),
@@ -40,7 +40,7 @@ RpcEngine::RpcEngine(::asio::io_service *io_service, const 
Options &options,
   protocol_name_(protocol_name),
   protocol_version_(protocol_version),
   call_id_(0),
-  retry_timer(*io_service),
+  retry_timer(io_service->GetRaw()),
   event_handlers_(std::make_shared()),
   connect_canceled_(false)
 {
@@ -86,7 +86,7 @@ bool RpcEngine::CancelPendingConnect() {
 
 void RpcEngine::Shutdown() {
   LOG_DEBUG(kRPC, << "RpcEngine::Shutdown called");
-  io_service_->post([this]() {
+  io_service_->PostLambda([this]() {
 std::lock_guard state_lock(engine_state_lock_);
 conn_.reset();
   });
@@ -154,7 +154,7 @@ void RpcEngine::AsyncRpc(
 
   // In case user-side code isn't checking the status of Connect before doing 
RPC
   if(connect_canceled_) {
-io_service_->post(
+io_service_->PostLambda(
 [handler](){ handler(Status::Canceled()); }
 );
 return;
@@ -190,7 +190,7 @@ void RpcEngine::AsyncRpcCommsError(
 std::vector> pendingRequests) {
   LOG_ERROR(kRPC, << "RpcEngine::AsyncRpcCommsError called; status=\"" << 
status.ToString() << "\" conn=" << failedConnection.get() << " reqs=" << 
std::to_string(pendingRequests.size()));
 
-  io_service().post([this, status, failedConnection, pendingRequests]() {
+  io_service_->PostLambda([this, status, failedConnection, pendingRequests]() {
 RpcCommsError(status, failedConnection, pendingRequests);
   });
 }
@@ -238,7 +238,7 @@ void RpcEngine::RpcCommsError(
   //on.  There might be a good argument for caching the first error
   //rather than the last one, that gets messy
 
-  io_service().post([req, status]() {
+  io_service()->PostLambda([req, status]() {
 req->OnResponseArrived(nullptr, status);  // Never call back while 
h

[02/44] hadoop git commit: HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable substitution. Contributed by Jim Brennan

2018-04-13 Thread xyao
HADOOP-15357. Configuration.getPropsWithPrefix no longer does variable 
substitution. Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8139754
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8139754
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8139754

Branch: refs/heads/HDFS-7240
Commit: e81397545a273cf9a090010eb644b836e0ef8c7b
Parents: d553799
Author: Jason Lowe 
Authored: Tue Apr 10 16:44:03 2018 -0500
Committer: Jason Lowe 
Committed: Tue Apr 10 16:44:55 2018 -0500

--
 .../org/apache/hadoop/conf/Configuration.java   | 11 +++--
 .../apache/hadoop/conf/TestConfiguration.java   | 26 +++-
 2 files changed, 24 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8139754/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 78a2e9f..f1e2a9d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2869,15 +2869,12 @@ public class Configuration implements 
Iterable>,
*/
   public Map getPropsWithPrefix(String confPrefix) {
 Properties props = getProps();
-Enumeration e = props.propertyNames();
 Map configMap = new HashMap<>();
-String name = null;
-while (e.hasMoreElements()) {
-  name = (String) e.nextElement();
+for (String name : props.stringPropertyNames()) {
   if (name.startsWith(confPrefix)) {
-String value = props.getProperty(name);
-name = name.substring(confPrefix.length());
-configMap.put(name, value);
+String value = get(name);
+String keyName = name.substring(confPrefix.length());
+configMap.put(keyName, value);
   }
 }
 return configMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8139754/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index b0bb0d7..33a9880 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -2320,19 +2320,33 @@ public class TestConfiguration {
 FileUtil.fullyDelete(tmpDir);
   }
 
+  @Test
   public void testGettingPropertiesWithPrefix() throws Exception {
 Configuration conf = new Configuration();
 for (int i = 0; i < 10; i++) {
-  conf.set("prefix" + ".name" + i, "value");
+  conf.set("prefix." + "name" + i, "value" + i);
 }
 conf.set("different.prefix" + ".name", "value");
-Map props = conf.getPropsWithPrefix("prefix");
-assertEquals(props.size(), 10);
+Map prefixedProps = conf.getPropsWithPrefix("prefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value" + i, prefixedProps.get("name" + i));
+}
 
+// Repeat test with variable substitution
+conf.set("foo", "bar");
+for (int i = 0; i < 10; i++) {
+  conf.set("subprefix." + "subname" + i, "value_${foo}" + i);
+}
+prefixedProps = conf.getPropsWithPrefix("subprefix.");
+assertEquals(prefixedProps.size(), 10);
+for (int i = 0; i < 10; i++) {
+  assertEquals("value_bar" + i, prefixedProps.get("subname" + i));
+}
 // test call with no properties for a given prefix
-props = conf.getPropsWithPrefix("none");
-assertNotNull(props.isEmpty());
-assertTrue(props.isEmpty());
+prefixedProps = conf.getPropsWithPrefix("none");
+assertNotNull(prefixedProps.isEmpty());
+assertTrue(prefixedProps.isEmpty());
   }
 
   public static void main(String[] argv) throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/44] hadoop git commit: HDFS-13428. RBF: Remove LinkedList From StateStoreFileImpl.java. Contributed by BELUGA BEHR.

2018-04-13 Thread xyao
HDFS-13428. RBF: Remove LinkedList From StateStoreFileImpl.java. Contributed by 
BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7d5bace
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7d5bace
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7d5bace

Branch: refs/heads/HDFS-7240
Commit: f7d5bace435a8de151b94ccc3599a6c4de8f7daf
Parents: 0c93d43
Author: Inigo Goiri 
Authored: Wed Apr 11 10:40:30 2018 -0700
Committer: Inigo Goiri 
Committed: Wed Apr 11 10:43:43 2018 -0700

--
 .../federation/store/driver/impl/StateStoreFileImpl.java | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7d5bace/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
index 7d9ddc6..6b288b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileImpl.java
@@ -26,9 +26,11 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
 import java.nio.charset.StandardCharsets;
-import java.util.LinkedList;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.slf4j.Logger;
@@ -134,15 +136,16 @@ public class StateStoreFileImpl extends 
StateStoreFileBaseImpl {
 
   @Override
   protected List getChildren(String path) {
-List ret = new LinkedList<>();
 File dir = new File(path);
 File[] files = dir.listFiles();
-if (files != null) {
+if (ArrayUtils.isNotEmpty(files)) {
+  List ret = new ArrayList<>(files.length);
   for (File file : files) {
 String filename = file.getName();
 ret.add(filename);
   }
+  return ret;
 }
-return ret;
+return Collections.emptyList();
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/44] hadoop git commit: HDFS-13045. RBF: Improve error message returned from subcluster. Contributed by Inigo Goiri.

2018-04-13 Thread xyao
HDFS-13045. RBF: Improve error message returned from subcluster. Contributed by 
Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c93d43f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c93d43f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c93d43f

Branch: refs/heads/HDFS-7240
Commit: 0c93d43f3d624a4fd17b3b050443d9e7e20d4f0a
Parents: eefe2a1
Author: Wei Yan 
Authored: Wed Apr 11 08:37:43 2018 -0700
Committer: Wei Yan 
Committed: Wed Apr 11 08:37:43 2018 -0700

--
 .../resolver/FederationNamespaceInfo.java   |  5 ++
 .../federation/resolver/MountTableResolver.java |  4 +-
 .../federation/resolver/RemoteLocation.java | 35 ++
 .../router/RemoteLocationContext.java   |  7 ++
 .../federation/router/RouterRpcClient.java  | 67 +++-
 .../federation/store/records/MountTable.java|  2 +-
 .../store/records/impl/pb/MountTablePBImpl.java |  2 +-
 .../hdfs/server/federation/MockResolver.java| 12 +++-
 .../federation/router/TestRouterAdmin.java  |  4 +-
 .../server/federation/router/TestRouterRpc.java | 48 ++
 .../store/records/TestMountTable.java   |  4 +-
 11 files changed, 165 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c93d43f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
index edcd308..33edd30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FederationNamespaceInfo.java
@@ -48,6 +48,11 @@ public class FederationNamespaceInfo extends 
RemoteLocationContext {
 return this.nameserviceId;
   }
 
+  @Override
+  public String getSrc() {
+return null;
+  }
+
   /**
* The HDFS cluster id for this namespace.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c93d43f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 9713138..3f6efd6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -388,7 +388,7 @@ public class MountTableResolver
 } else {
   // Not found, use default location
   RemoteLocation remoteLocation =
-  new RemoteLocation(defaultNameService, path);
+  new RemoteLocation(defaultNameService, path, path);
   List locations =
   Collections.singletonList(remoteLocation);
   ret = new PathLocation(null, locations);
@@ -519,7 +519,7 @@ public class MountTableResolver
 newPath += Path.SEPARATOR;
   }
   newPath += remainingPath;
-  RemoteLocation remoteLocation = new RemoteLocation(nsId, newPath);
+  RemoteLocation remoteLocation = new RemoteLocation(nsId, newPath, path);
   locations.add(remoteLocation);
 }
 DestinationOrder order = entry.getDestOrder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c93d43f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java
index 6aa12ce..77d0500 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/RemoteLocation.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import o

[23/44] hadoop git commit: HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho and Ted Yu.

2018-04-13 Thread xyao
HDFS-7101. Potential null dereference in DFSck#doWork(). Contributed by skrho 
and Ted Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/113af12c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/113af12c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/113af12c

Branch: refs/heads/HDFS-7240
Commit: 113af12cfb240ea9a7189bb2701693466eb8e993
Parents: 832852c
Author: Akira Ajisaka 
Authored: Thu Apr 12 17:47:37 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 12 17:47:37 2018 +0900

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/113af12c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 96fca24..10b0012 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -354,7 +354,7 @@ public class DFSck extends Configured implements Tool {
 BufferedReader input = new BufferedReader(new InputStreamReader(
   stream, "UTF-8"));
 String line = null;
-String lastLine = null;
+String lastLine = NamenodeFsck.CORRUPT_STATUS;
 int errCode = -1;
 try {
   while ((line = input.readLine()) != null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/44] hadoop git commit: YARN-8116. Nodemanager fails with NumberFormatException: For input string: . (Chandni Singh via wangda)

2018-04-13 Thread xyao
YARN-8116. Nodemanager fails with NumberFormatException: For input string: . 
(Chandni Singh via wangda)

Change-Id: Idd30cfca59982d3fc6e47aa1b88f844a78fae94d


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bf9cc2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bf9cc2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bf9cc2c

Branch: refs/heads/HDFS-7240
Commit: 2bf9cc2c73944c9f7cde56714b8cf6995cfa539b
Parents: c467f31
Author: Wangda Tan 
Authored: Tue Apr 10 17:32:38 2018 -0700
Committer: Wangda Tan 
Committed: Tue Apr 10 17:32:38 2018 -0700

--
 .../containermanager/container/ContainerImpl.java   |  3 ++-
 .../recovery/NMLeveldbStateStoreService.java|  4 +++-
 .../recovery/TestNMLeveldbStateStoreService.java| 16 
 3 files changed, 21 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf9cc2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 2115100..c09c7f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -2191,7 +2191,8 @@ public class ContainerImpl implements Container {
   }
 
   private void storeRetryContext() {
-if (windowRetryContext.getRestartTimes() != null) {
+if (windowRetryContext.getRestartTimes() != null &&
+!windowRetryContext.getRestartTimes().isEmpty()) {
   try {
 stateStore.storeContainerRestartTimes(containerId,
 windowRetryContext.getRestartTimes());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf9cc2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index bf4c0ad..723dd48 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -347,7 +347,9 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
 value.substring(1, value.length() - 1).split(", ");
 List restartTimes = new ArrayList<>();
 for (String restartTime : unparsedRestartTimes) {
-  restartTimes.add(Long.parseLong(restartTime));
+  if (!restartTime.isEmpty()) {
+restartTimes.add(Long.parseLong(restartTime));
+  }
 }
 rcs.setRestartTimes(restartTimes);
   } else if (suffix.equals(CONTAINER_WORK_DIR_KEY_SUFFIX)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf9cc2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
index c270199..265b3e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-serv

[03/44] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-13 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/583fa6ed/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 1517b04..c171143 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1,3 +1,4 @@
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -31,26 +32,35 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
+import org.apache.hadoop.crypto.key.kms.KMSTokenRenewer;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
+import org.apache.hadoop.crypto.key.kms.TestLoadBalancingKMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
+import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.KMSUtil;
+import org.apache.hadoop.util.KMSUtilFaultInjector;
 import org.apache.hadoop.util.Time;
 import org.apache.http.client.utils.URIBuilder;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
@@ -71,7 +81,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.Writer;
 import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketTimeoutException;
 import java.net.URI;
@@ -96,6 +105,10 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
+import static org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_KIND;
+import static 
org.apache.hadoop.crypto.key.kms.KMSDelegationToken.TOKEN_LEGACY_KIND;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -113,6 +126,20 @@ public class TestKMS {
 
   private SSLFactory sslFactory;
 
+  private final KMSUtilFaultInjector oldInjector =
+  KMSUtilFaultInjector.get();
+
+  // Injector to create providers with different ports. Can only happen in 
tests
+  private final KMSUtilFaultInjector testInjector =
+  new KMSUtilFaultInjector() {
+@Override
+public KeyProvider createKeyProviderForTests(String value,
+Configuration conf) throws IOException {
+  return TestLoadBalancingKMSClientProvider
+  .createKeyProviderForTests(value, conf);
+}
+  };
+
   // Keep track of all key providers created during a test case, so they can be
   // closed at test tearDown.
   private List providersCreated = new LinkedList<>();
@@ -122,7 +149,12 @@ public class TestKMS {
 
   @Before
   public void setUp() throws Exception {
-setUpMiniKdc();
+GenericTestUtils.setLogLevel(KMSClientProvider.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticationHandler.LOG, Level.TRACE);
+GenericTestUtils
+.setLogLevel(DelegationTokenAuthenticator.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(KMSUtil.LOG, Level.TRACE);
 // resetting kerberos security
 Configuration conf = new Configuration();
 Us

[10/44] hadoop git commit: HDFS-13056. Expose file-level composite CRCs in HDFS which are comparable across different instances/layouts. Contributed by Dennis Huo.

2018-04-13 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c9cdad6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
index 2356201..384da54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/datatransfer.proto
@@ -148,8 +148,9 @@ message OpCopyBlockProto {
   required BaseHeaderProto header = 1;
 }
 
-message OpBlockChecksumProto { 
+message OpBlockChecksumProto {
   required BaseHeaderProto header = 1;
+  optional BlockChecksumOptionsProto blockChecksumOptions = 2;
 }
 
 message OpBlockGroupChecksumProto {
@@ -160,6 +161,7 @@ message OpBlockGroupChecksumProto {
   required ErasureCodingPolicyProto ecPolicy = 4;
   repeated uint32 blockIndices = 5;
   required uint64 requestedNumBytes = 6;
+  optional BlockChecksumOptionsProto blockChecksumOptions = 7;
 }
 
 /**
@@ -313,8 +315,9 @@ message DNTransferAckProto {
 message OpBlockChecksumResponseProto {
   required uint32 bytesPerCrc = 1;
   required uint64 crcPerBlock = 2;
-  required bytes md5 = 3;
+  required bytes blockChecksum = 3;
   optional ChecksumTypeProto crcType = 4;
+  optional BlockChecksumOptionsProto blockChecksumOptions = 5;
 }
 
 message OpCustomProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c9cdad6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 29d0b4e..441b9d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -480,6 +480,27 @@ enum ChecksumTypeProto {
   CHECKSUM_CRC32C = 2;
 }
 
+enum BlockChecksumTypeProto {
+  MD5CRC = 1;  // BlockChecksum obtained by taking the MD5 digest of chunk CRCs
+  COMPOSITE_CRC = 2;  // Chunk-independent CRC, optionally striped
+}
+
+/**
+ * Algorithms/types denoting how block-level checksums are computed using
+ * lower-level chunk checksums/CRCs.
+ * These options should be kept in sync with
+ * org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
+ */
+message BlockChecksumOptionsProto {
+  optional BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];
+
+  // Only used if blockChecksumType specifies a striped format, such as
+  // COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
+  // to be the concatenation of N crcs, where
+  // N == ((requestedLength - 1) / stripedLength) + 1
+  optional uint64 stripeLength = 2;
+}
+
 /**
  * HDFS Server Defaults
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c9cdad6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
index bab2e8d..5d2d1f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
@@ -301,8 +301,9 @@ public abstract class Receiver implements 
DataTransferProtocol {
 TraceScope traceScope = continueTraceSpan(proto.getHeader(),
 proto.getClass().getSimpleName());
 try {
-blockChecksum(PBHelperClient.convert(proto.getHeader().getBlock()),
-PBHelperClient.convert(proto.getHeader().getToken()));
+  blockChecksum(PBHelperClient.convert(proto.getHeader().getBlock()),
+  PBHelperClient.convert(proto.getHeader().getToken()),
+  PBHelperClient.convert(proto.getBlockChecksumOptions()));
 } finally {
   if (traceScope != null) traceScope.close();
 }
@@ -325,7 +326,8 @@ public abstract class Receiver implements 
DataTransferProtocol {
 try {
   blockGroupChecksum(stripedBlockInfo,
   PBHelperClient.convert(proto.getHeader().getToken()),
-  proto.getRequestedNumBytes());
+  proto.getRequestedNumBytes(),
+  PBHelperClient.convert(proto.getBlockChecksumOptions()));
 } finally {
   if (traceScope != null) {
 traceScope.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c9cdad6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdf

[18/44] hadoop git commit: YARN-7221. Add security check for privileged docker container. Contributed by Eric Yang

2018-04-13 Thread xyao
YARN-7221. Add security check for privileged docker container. Contributed by 
Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/933477e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/933477e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/933477e9

Branch: refs/heads/HDFS-7240
Commit: 933477e9e0526e2ed81ea454f8806de31981822a
Parents: f7d5bac
Author: Billie Rinaldi 
Authored: Wed Apr 11 08:23:20 2018 -0700
Committer: Billie Rinaldi 
Committed: Wed Apr 11 11:24:23 2018 -0700

--
 .../runtime/DockerLinuxContainerRuntime.java|  10 +-
 .../container-executor/impl/utils/docker-util.c | 100 ++-
 .../test/utils/test_docker_util.cc  |  97 +-
 .../runtime/TestDockerContainerRuntime.java |  11 +-
 4 files changed, 157 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/933477e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 51abeb6..7106aad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -767,7 +767,11 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   throw new ContainerExecutionException(message);
 }
   }
-  dockerRunAsUser = uid + ":" + gid;
+  if (!allowPrivilegedContainerExecution(container)) {
+dockerRunAsUser = uid + ":" + gid;
+  } else {
+dockerRunAsUser = ctx.getExecutionAttribute(USER);
+  }
 }
 
 //List -> stored as List -> fetched/converted to List
@@ -879,7 +883,9 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 }
 
 if(enableUserReMapping) {
-  runCommand.groupAdd(groups);
+  if (!allowPrivilegedContainerExecution(container)) {
+runCommand.groupAdd(groups);
+  }
 }
 
 // use plugins to update docker run command.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/933477e9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index 3bd94a1..fdeaeea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -16,6 +16,9 @@
  * limitations under the License.
  */
 
+#include 
+#include 
+#include 
 #include 
 #include 
 #include 
@@ -25,6 +28,9 @@
 #include "docker-util.h"
 #include "string-utils.h"
 #include "util.h"
+#include 
+#include 
+#include 
 
 static int read_and_verify_command_file(const char *command_file, const char 
*docker_command,
 struct configuration *command_config) {
@@ -1254,14 +1260,94 @@ static int  add_rw_mounts(const struct configuration 
*command_config, const stru
   return add_mounts(command_config, conf, "rw-mounts", 0, out, outlen);
 }
 
+static int check_privileges(const char *user) {
+  int ngroups = 0;
+  gid_t *groups = NULL;
+  struct passwd *pw;
+  struct group *gr;
+  int ret = 0;
+  int waitid = -1;
+  int statval = 0;
+
+  pw = getpwnam(user);
+  if (pw == NULL) {
+fprintf(ERRORFILE, "User %s does not exist in host OS.\n", user);
+exit(INITIALIZE_USER_FAILED);
+  }
+
+  int rc = getgrouplist(user, pw->pw_gid, groups, &ngroups);
+  i

[29/44] hadoop git commit: Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun."

2018-04-13 Thread xyao
Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs 
all the time. Contributed by Jinglun."

This reverts commit ac32b3576da4cc463dff85118163ccfff02215fc.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5353c75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5353c75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5353c75

Branch: refs/heads/HDFS-7240
Commit: b5353c75d90b9299f04dba255b9e9af5a8cc19eb
Parents: 4571351
Author: Inigo Goiri 
Authored: Thu Apr 12 09:28:23 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Apr 12 09:28:23 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 --
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5353c75/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 1c38791..7b9cd64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -79,9 +79,6 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
-  if (currentUsedProxy != null) {
-return method.invoke(currentUsedProxy.proxy, args);
-  }
   Map, ProxyInfo> proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5353c75/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 4b3fdf9..8d6b02d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -43,13 +43,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -103,37 +100,6 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
-  public void testRequestNNAfterOneSuccess() throws Exception {
-final AtomicInteger count = new AtomicInteger(0);
-final ClientProtocol goodMock = mock(ClientProtocol.class);
-when(goodMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-Thread.sleep(1000);
-return new long[]{1};
-  }
-});
-final ClientProtocol badMock = mock(ClientProtocol.class);
-when(badMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-throw new IOException("Bad mock !!");
-  }
-});
-
-RequestHedgingProxyProvider provider =
-new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
-createFactory(badMock, goodMock, goodMock, badMock));
-ClientProtocol proxy = provider.getProxy().proxy;
-proxy.getStats();
-assertEquals(2, count.get());
-proxy.getStats();
-assertEquals(3, count.get());
-  }
-
-  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {


-
To unsubscribe, e-ma

[28/44] hadoop git commit: MAPREDUCE-7069. Add ability to specify user environment variables individually. Contributed by Jim Brennan

2018-04-13 Thread xyao
MAPREDUCE-7069. Add ability to specify user environment variables individually. 
Contributed by Jim Brennan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4571351c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4571351c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4571351c

Branch: refs/heads/HDFS-7240
Commit: 4571351cccf6d4977469d3d623cf045b06a5f5f0
Parents: 6bb128d
Author: Jason Lowe 
Authored: Thu Apr 12 11:04:22 2018 -0500
Committer: Jason Lowe 
Committed: Thu Apr 12 11:12:46 2018 -0500

--
 .../apache/hadoop/mapred/MapReduceChildJVM.java |  73 +-
 .../v2/app/job/impl/TaskAttemptImpl.java|   8 +-
 .../v2/app/job/impl/TestMapReduceChildJVM.java  |  24 +++-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  10 ++
 .../java/org/apache/hadoop/mapred/JobConf.java  |  18 +++
 .../src/main/resources/mapred-default.xml   |  61 +++--
 .../src/site/markdown/MapReduceTutorial.md  |   6 +
 .../org/apache/hadoop/mapred/YARNRunner.java|  11 +-
 .../apache/hadoop/mapred/TestYARNRunner.java|  26 +++-
 .../java/org/apache/hadoop/yarn/util/Apps.java  | 115 +---
 .../org/apache/hadoop/yarn/util/TestApps.java   | 136 +++
 11 files changed, 407 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4571351c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index 936dc5a..d305f9f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.mapred;
 
 import java.net.InetSocketAddress;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Vector;
@@ -28,7 +27,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.TaskLog.LogName;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
@@ -42,50 +40,53 @@ public class MapReduceChildJVM {
 filter.toString();
   }
 
-  private static String getChildEnv(JobConf jobConf, boolean isMap) {
+  private static String getChildEnvProp(JobConf jobConf, boolean isMap) {
 if (isMap) {
-  return jobConf.get(JobConf.MAPRED_MAP_TASK_ENV,
-  jobConf.get(JobConf.MAPRED_TASK_ENV));
+  return JobConf.MAPRED_MAP_TASK_ENV;
 }
-return jobConf.get(JobConf.MAPRED_REDUCE_TASK_ENV,
-jobConf.get(JobConf.MAPRED_TASK_ENV));
+return JobConf.MAPRED_REDUCE_TASK_ENV;
+  }
+
+  private static String getChildEnvDefaultValue(JobConf jobConf) {
+// There is no default value for these - use the fallback value instead.
+return jobConf.get(JobConf.MAPRED_TASK_ENV);
   }
 
   public static void setVMEnv(Map environment,
   Task task) {
 
 JobConf conf = task.conf;
-// Add the env variables passed by the user
-String mapredChildEnv = getChildEnv(conf, task.isMapTask());
-MRApps.setEnvFromInputString(environment, mapredChildEnv, conf);
-
-// Set logging level in the environment.
-// This is so that, if the child forks another "bin/hadoop" (common in
-// streaming) it will have the correct loglevel.
-environment.put(
-"HADOOP_ROOT_LOGGER", 
-MRApps.getChildLogLevel(conf, task.isMapTask()) + ",console");
-
-// TODO: The following is useful for instance in streaming tasks. Should be
-// set in ApplicationMaster's env by the RM.
-String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS");
-if (hadoopClientOpts == null) {
-  hadoopClientOpts = "";
-} else {
-  hadoopClientOpts = hadoopClientOpts + " ";
+boolean isMap = task.isMapTask();
+
+// Remove these before adding the user variables to prevent
+// MRApps.setEnvFromInputProperty() from appending to them.
+String hadoopRootLoggerKey = "HADOOP_ROOT_LOGGER";
+String hadoopClientOptsKey = "HADOOP_CLIENT_OPTS";
+environment.remove(hadoopRootLoggerKey

[32/44] hadoop git commit: YARN-7936. Add default service AM Xmx. Contributed by Jian He

2018-04-13 Thread xyao
YARN-7936. Add default service AM Xmx. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53b3e594
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53b3e594
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53b3e594

Branch: refs/heads/HDFS-7240
Commit: 53b3e594732e7a567dda4e08b9a8af5f87a4472a
Parents: 1884459
Author: Billie Rinaldi 
Authored: Thu Apr 12 11:35:14 2018 -0700
Committer: Billie Rinaldi 
Committed: Thu Apr 12 12:38:00 2018 -0700

--
 .../apache/hadoop/yarn/service/client/ServiceClient.java| 9 +++--
 .../apache/hadoop/yarn/service/conf/YarnServiceConf.java| 2 ++
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b3e594/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 4c7b72d..21fb075 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -758,8 +758,13 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
   Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException {
 JavaCommandLineBuilder CLI = new JavaCommandLineBuilder();
 CLI.forceIPv4().headless();
-CLI.setJVMOpts(YarnServiceConf.get(YarnServiceConf.JVM_OPTS, null,
-app.getConfiguration(), conf));
+String jvmOpts = YarnServiceConf
+.get(YarnServiceConf.JVM_OPTS, "", app.getConfiguration(), conf);
+if (!jvmOpts.contains("-Xmx")) {
+  jvmOpts += DEFAULT_AM_JVM_XMX;
+}
+
+CLI.setJVMOpts(jvmOpts);
 if (hasSliderAMLog4j) {
   CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, YARN_SERVICE_LOG4J_FILENAME);
   CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b3e594/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
index 14c4d15..3dd5a7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
@@ -38,6 +38,8 @@ public class YarnServiceConf {
   public static final String AM_RESOURCE_MEM = 
"yarn.service.am-resource.memory";
   public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
 
+  public static final String DEFAULT_AM_JVM_XMX = " -Xmx768m ";
+
   public static final String YARN_QUEUE = "yarn.service.queue";
 
   public static final String API_SERVER_ADDRESS = 
"yarn.service.api-server.address";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/44] hadoop git commit: YARN-8153. Guaranteed containers always stay in SCHEDULED on NM after restart. Contributed by Yang Wang.

2018-04-13 Thread xyao
YARN-8153. Guaranteed containers always stay in SCHEDULED on NM after restart. 
Contributed by Yang Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/226bedc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/226bedc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/226bedc0

Branch: refs/heads/HDFS-7240
Commit: 226bedc0239ba23f3ca0c40dac6aab3777d3ada6
Parents: 375654c
Author: Weiwei Yang 
Authored: Fri Apr 13 13:17:37 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Apr 13 13:17:37 2018 +0800

--
 .../scheduler/ContainerScheduler.java   |  7 ++-
 .../TestContainerManagerRecovery.java   | 51 
 2 files changed, 56 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/226bedc0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index d9b713f..57368ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -501,8 +501,11 @@ public class ContainerScheduler extends AbstractService 
implements
 
   private void startContainer(Container container) {
 LOG.info("Starting container [" + container.getContainerId()+ "]");
-runningContainers.put(container.getContainerId(), container);
-this.utilizationTracker.addContainerResources(container);
+// Skip to put into runningContainers and addUtilization when recover
+if (!runningContainers.containsKey(container.getContainerId())) {
+  runningContainers.put(container.getContainerId(), container);
+  this.utilizationTracker.addContainerResources(container);
+}
 if (container.getContainerTokenIdentifier().getExecutionType() ==
 ExecutionType.OPPORTUNISTIC) {
   this.metrics.startOpportunisticContainer(container.getResource());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/226bedc0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
index cad835c..bf8b500 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -91,6 +92,7 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
 import 
org.apach

[05/44] hadoop git commit: YARN-7973. Added ContainerRelaunch feature for Docker containers. Contributed by Shane Kumpf

2018-04-13 Thread xyao
YARN-7973. Added ContainerRelaunch feature for Docker containers.
   Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c467f311
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c467f311
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c467f311

Branch: refs/heads/HDFS-7240
Commit: c467f311d0c7155c09052d93fac12045af925583
Parents: 583fa6e
Author: Eric Yang 
Authored: Tue Apr 10 19:25:00 2018 -0400
Committer: Eric Yang 
Committed: Tue Apr 10 19:25:00 2018 -0400

--
 .../hadoop/yarn/api/ApplicationConstants.java   |  10 ++
 .../server/nodemanager/ContainerExecutor.java   |  11 ++
 .../nodemanager/DefaultContainerExecutor.java   |   6 +
 .../nodemanager/LinuxContainerExecutor.java | 124 +++
 .../launcher/ContainerLaunch.java   |  20 ++-
 .../launcher/ContainerRelaunch.java |   2 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |   6 +
 .../DelegatingLinuxContainerRuntime.java|   9 ++
 .../runtime/DockerLinuxContainerRuntime.java|  40 +-
 .../JavaSandboxLinuxContainerRuntime.java   |  10 ++
 .../runtime/docker/DockerCommandExecutor.java   |  14 +++
 .../runtime/docker/DockerStartCommand.java  |  29 +
 .../runtime/ContainerRuntime.java   |  10 ++
 .../impl/container-executor.c   |   2 +-
 .../container-executor/impl/utils/docker-util.c |  40 ++
 .../container-executor/impl/utils/docker-util.h |  11 ++
 .../test/utils/test_docker_util.cc  |  20 +++
 .../nodemanager/TestLinuxContainerExecutor.java |  12 ++
 .../launcher/TestContainerRelaunch.java |   2 +-
 .../runtime/TestDockerContainerRuntime.java |  90 ++
 .../runtime/docker/TestDockerStartCommand.java  |  53 
 .../TestContainersMonitorResourceChange.java|   5 +
 22 files changed, 470 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c467f311/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 64bcc44..38ad596 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -99,6 +99,16 @@ public interface ApplicationConstants {
   public static final String STDOUT = "stdout";
 
   /**
+   * The type of launch for the container.
+   */
+  @Public
+  @Unstable
+  enum ContainerLaunchType {
+LAUNCH,
+RELAUNCH
+  }
+
+  /**
* Environment for Applications.
*
* Some of the environment variables for applications are final

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c467f311/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index f566f48..8e335350 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -182,6 +182,17 @@ public abstract class ContainerExecutor implements 
Configurable {
   IOException, ConfigurationException;
 
   /**
+   * Relaunch the container on the node. This is a blocking call and returns
+   * only when the container exits.
+   * @param ctx Encapsulates information necessary for relaunching containers.
+   * @return the return status of the relaunch
+   * @throws IOException if the container relaunch fails
+   * @throws ConfigurationException if config error was found
+   */
+  public abstract int relaunchContainer(ContainerStartContext ctx) throws
+  IOException, ConfigurationException;
+
+  /**
* Signal container with the specified signal.
*
* @param ctx Encapsulates information necessary fo

[34/44] hadoop git commit: HADOOP-15379. Make IrqHandler.bind() public. Contributed by Ajay Kumar

2018-04-13 Thread xyao
HADOOP-15379. Make IrqHandler.bind() public. Contributed by Ajay Kumar


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec1e8c1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec1e8c1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec1e8c1a

Branch: refs/heads/HDFS-7240
Commit: ec1e8c1a8ca3d01b82ca82d73ba1132b6625d659
Parents: 9b0b9f2
Author: Bharat Viswanadham 
Authored: Thu Apr 12 21:51:20 2018 -0700
Committer: Bharat Viswanadham 
Committed: Thu Apr 12 21:51:20 2018 -0700

--
 .../main/java/org/apache/hadoop/service/launcher/IrqHandler.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec1e8c1a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
index 30bb91c..17aa963 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java
@@ -86,7 +86,7 @@ public final class IrqHandler implements SignalHandler {
* Bind to the interrupt handler.
* @throws IllegalArgumentException if the exception could not be set
*/
-  void bind() {
+  public void bind() {
 Preconditions.checkState(signal == null, "Handler already bound");
 try {
   signal = new Signal(name);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/44] hadoop git commit: HDFS-13430. Fix TestEncryptionZonesWithKMS failure due to HADOOP-14445.

2018-04-13 Thread xyao
HDFS-13430. Fix TestEncryptionZonesWithKMS failure due to HADOOP-14445.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65035937
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65035937
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65035937

Branch: refs/heads/HDFS-7240
Commit: 650359371175fba416331e73aa03d2a96ccb90e5
Parents: fa8b88a
Author: Xiao Chen 
Authored: Fri Apr 13 09:04:52 2018 -0700
Committer: Xiao Chen 
Committed: Fri Apr 13 09:05:17 2018 -0700

--
 .../src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65035937/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 6f9ef29..51c6c4e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -110,6 +110,7 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
 import static org.junit.Assert.assertNotNull;
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyLong;
@@ -192,6 +193,8 @@ public class TestEncryptionZones {
 // Lower the batch size for testing
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
 2);
+// disable kms client copy legacy token logic because it's irrelevant.
+conf.setBoolean(KMS_CLIENT_COPY_LEGACY_TOKEN_KEY, false);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
 cluster.waitActive();
 Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/44] hadoop git commit: HADOOP-12502 SetReplication OutOfMemoryError. Contributed by Vinayakumar B.

2018-04-13 Thread xyao
HADOOP-12502 SetReplication OutOfMemoryError. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d898b7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d898b7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d898b7b

Branch: refs/heads/HDFS-7240
Commit: 0d898b7bb8d8d616133236da979a4316be4c1a6f
Parents: 18de6f2
Author: Aaron Fabbri 
Authored: Wed Apr 11 17:19:56 2018 -0700
Committer: Aaron Fabbri 
Committed: Wed Apr 11 17:19:56 2018 -0700

--
 .../apache/hadoop/fs/ChecksumFileSystem.java|  9 ++-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  2 +-
 .../org/apache/hadoop/fs/shell/Command.java | 69 ++--
 .../apache/hadoop/fs/shell/CopyCommands.java|  6 ++
 .../java/org/apache/hadoop/fs/shell/Ls.java | 26 +++-
 .../org/apache/hadoop/fs/shell/PathData.java| 27 
 .../apache/hadoop/fs/shell/find/TestFind.java   | 34 +-
 7 files changed, 161 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d898b7b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 14c1905..663c910 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -677,7 +677,14 @@ public abstract class ChecksumFileSystem extends 
FilterFileSystem {
   public FileStatus[] listStatus(Path f) throws IOException {
 return fs.listStatus(f, DEFAULT_FILTER);
   }
-  
+
+  @Override
+  public RemoteIterator listStatusIterator(final Path p)
+  throws IOException {
+// Not-using fs#listStatusIterator() since it includes crc files as well
+return new DirListingIterator<>(p);
+  }
+
   /**
* List the statuses of the files/directories in the given path if the path 
is
* a directory.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d898b7b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index facfe03..707b921 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -2147,7 +2147,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
   /**
* Generic iterator for implementing {@link #listStatusIterator(Path)}.
*/
-  private class DirListingIterator implements
+  protected class DirListingIterator implements
   RemoteIterator {
 
 private final Path path;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d898b7b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
index c292cf6..a4746cf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -325,14 +326,9 @@ abstract public class Command extends Configured {
*/
   protected void processPaths(PathData parent, PathData ... items)
   throws IOException {
-// TODO: this really should be iterative
 for (PathData item : items) {
   try {
-processPath(item);
-if (recursive && isPathRecursable(item)) {
-  recursePath(item);
-}
-postProcessPath(item);
+processPathInternal(item);
   } catch (IOException e) {
 displayError(e);
   }
@@ -340,6 +336,59 @@ abstract public class Command extends Configured {
   }
 
   /**
+   * Iterates over the 

[33/44] hadoop git commit: HDFS-13436. Fix javadoc of package-info.java

2018-04-13 Thread xyao
HDFS-13436. Fix javadoc of package-info.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b0b9f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b0b9f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b0b9f2a

Branch: refs/heads/HDFS-7240
Commit: 9b0b9f2af2f6827d7430f995d3203c4cb7ef7e48
Parents: 53b3e59
Author: Akira Ajisaka 
Authored: Fri Apr 13 13:23:44 2018 +0900
Committer: Akira Ajisaka 
Committed: Fri Apr 13 13:23:51 2018 +0900

--
 .../hadoop/hdfs/protocol/datatransfer/package-info.java | 8 
 .../hdfs/server/common/blockaliasmap/package-info.java  | 9 +
 .../hdfs/server/diskbalancer/connectors/package-info.java   | 3 +--
 .../hdfs/server/diskbalancer/datamodel/package-info.java| 4 ++--
 .../hadoop/hdfs/server/diskbalancer/package-info.java   | 2 +-
 .../hdfs/server/diskbalancer/planner/package-info.java  | 2 +-
 6 files changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0b9f2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
index a13c7d8..13c0c59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
@@ -15,10 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@InterfaceStability.Evolving
-package org.apache.hadoop.hdfs.protocol.datatransfer;
-import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * This package contains classes related to hdfs data transfer protocol.
- */
\ No newline at end of file
+ */
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0b9f2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
index b906791..d088945 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/package-info.java
@@ -15,13 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-package org.apache.hadoop.hdfs.server.common.blockaliasmap;
 
 /**
  * The AliasMap defines mapping of PROVIDED HDFS blocks to data in remote
  * storage systems.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+package org.apache.hadoop.hdfs.server.common.blockaliasmap;
+
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0b9f2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
index b4b4437..f118c2e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
@@ -15,8 +15,6 @@
  * the License.
  */
 
-package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
-
 /**
  * Connectors package is a set of logical connectors that connect
  * to various data sources to read the hadoop cluster information.
@@ -35,3 +33,4 @@ package org.apache.hadoop.hdfs.server.diskbalancer.co

[04/44] hadoop git commit: HADOOP-14445. Delegation tokens are not shared between KMS instances. Contributed by Xiao Chen and Rushabh S Shah.

2018-04-13 Thread xyao
HADOOP-14445. Delegation tokens are not shared between KMS instances. 
Contributed by Xiao Chen and Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/583fa6ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/583fa6ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/583fa6ed

Branch: refs/heads/HDFS-7240
Commit: 583fa6ed48ad3df40bcaa9c591d5ccd07ce3ea81
Parents: e813975
Author: Xiao Chen 
Authored: Tue Apr 10 15:26:33 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 15:38:25 2018 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   | 212 
 .../crypto/key/kms/KMSDelegationToken.java  |  22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |  56 ++
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  | 103 
 .../hadoop/crypto/key/kms/package-info.java |  18 +
 .../fs/CommonConfigurationKeysPublic.java   |  10 +
 .../web/DelegationTokenAuthenticatedURL.java|  21 +-
 .../DelegationTokenAuthenticationHandler.java   |   8 +-
 .../web/DelegationTokenAuthenticator.java   |   2 +-
 .../java/org/apache/hadoop/util/KMSUtil.java|  45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |  49 ++
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |   3 +-
 .../src/main/resources/core-default.xml |  20 +
 .../crypto/key/kms/TestKMSClientProvider.java   | 162 ++
 .../kms/TestLoadBalancingKMSClientProvider.java |  67 ++-
 .../org/apache/hadoop/util/TestKMSUtil.java |  65 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 519 ---
 18 files changed, 1180 insertions(+), 203 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/583fa6ed/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 2eb2e21..f97fde7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -36,8 +36,9 @@ import 
org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.TokenSelector;
 import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import 
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
 import 
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.hadoop.util.KMSUtil;
@@ -82,6 +83,8 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_KEY;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_COPY_LEGACY_TOKEN_DEFAULT;
 import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
 import static org.apache.hadoop.util.KMSUtil.checkNotNull;
 import static org.apache.hadoop.util.KMSUtil.parseJSONEncKeyVersion;
@@ -96,16 +99,13 @@ import static 
org.apache.hadoop.util.KMSUtil.parseJSONMetadata;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
-  private static final Logger LOG =
+  public static final Logger LOG =
   LoggerFactory.getLogger(KMSClientProvider.class);
 
   private static final String INVALID_SIGNATURE = "Invalid signature";
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous 
requests are disallowed";
 
-  public static final String TOKEN_KIND_STR = 
KMSDelegationToken.TOKEN_KIND_STR;
-  public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND;
-
   public static final String SCHEME_NAME = "kms";
 
   private static final String UTF8 = "UTF-8";
@@ -133,12 +133,17 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   private static final ObjectWriter WRITER =
   new ObjectMapper().writerWithDefaultPrettyPrinter();
 
+  /* dtService defines the token service value for the kms token.
+   * The value can be legacy format which is

[31/44] hadoop git commit: YARN-8147. TestClientRMService#testGetApplications sporadically fails. Contributed by Jason Lowe

2018-04-13 Thread xyao
YARN-8147. TestClientRMService#testGetApplications sporadically fails. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18844599
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18844599
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18844599

Branch: refs/heads/HDFS-7240
Commit: 18844599aef42f79d2af4500aa2eee472dda95cb
Parents: 044341b
Author: Eric E Payne 
Authored: Thu Apr 12 17:53:57 2018 +
Committer: Eric E Payne 
Committed: Thu Apr 12 17:53:57 2018 +

--
 .../server/resourcemanager/TestClientRMService.java   | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18844599/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 1c50dd3..d66a866 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -1064,7 +1064,7 @@ public class TestClientRMService {
   }
 
   @Test
-  public void testGetApplications() throws IOException, YarnException {
+  public void testGetApplications() throws Exception {
 /**
  * 1. Submit 3 applications alternately in two queues
  * 2. Test each of the filters
@@ -1113,8 +1113,12 @@ public class TestClientRMService {
   SubmitApplicationRequest submitRequest = mockSubmitAppRequest(
   appId, appNames[i], queues[i % queues.length],
   new HashSet(tags.subList(0, i + 1)));
+  // make sure each app is submitted at a different time
+  Thread.sleep(1);
   rmService.submitApplication(submitRequest);
-  submitTimeMillis[i] = System.currentTimeMillis();
+  submitTimeMillis[i] = rmService.getApplicationReport(
+  GetApplicationReportRequest.newInstance(appId))
+  .getApplicationReport().getStartTime();
 }
 
 // Test different cases of ClientRMService#getApplications()
@@ -1129,19 +1133,19 @@ public class TestClientRMService {
 
 // Check start range
 request = GetApplicationsRequest.newInstance();
-request.setStartRange(submitTimeMillis[0], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[0] + 1, System.currentTimeMillis());
 
 // 2 applications are submitted after first timeMills
 assertEquals("Incorrect number of matching start range", 
 2, rmService.getApplications(request).getApplicationList().size());
 
 // 1 application is submitted after the second timeMills
-request.setStartRange(submitTimeMillis[1], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[1] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 1, rmService.getApplications(request).getApplicationList().size());
 
 // no application is submitted after the third timeMills
-request.setStartRange(submitTimeMillis[2], System.currentTimeMillis());
+request.setStartRange(submitTimeMillis[2] + 1, System.currentTimeMillis());
 assertEquals("Incorrect number of matching start range", 
 0, rmService.getApplications(request).getApplicationList().size());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/44] hadoop git commit: HDFS-13056. Expose file-level composite CRCs in HDFS which are comparable across different instances/layouts. Contributed by Dennis Huo.

2018-04-13 Thread xyao
HDFS-13056. Expose file-level composite CRCs in HDFS which are comparable 
across different instances/layouts. Contributed by Dennis Huo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c9cdad6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c9cdad6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c9cdad6

Branch: refs/heads/HDFS-7240
Commit: 7c9cdad6d04c98db5a83e2108219bf6e6c903daf
Parents: 6cc59a0
Author: Xiao Chen 
Authored: Tue Apr 10 20:56:07 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 21:31:48 2018 -0700

--
 .../hadoop/fs/CompositeCrcFileChecksum.java |  82 +
 .../main/java/org/apache/hadoop/fs/Options.java |  11 +
 .../org/apache/hadoop/util/CrcComposer.java | 187 ++
 .../java/org/apache/hadoop/util/CrcUtil.java| 220 +++
 .../org/apache/hadoop/util/DataChecksum.java|  18 +
 .../org/apache/hadoop/util/TestCrcComposer.java | 242 
 .../org/apache/hadoop/util/TestCrcUtil.java | 232 
 .../main/java/org/apache/hadoop/fs/Hdfs.java|   4 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  56 ++-
 .../hadoop/hdfs/DistributedFileSystem.java  |   5 +-
 .../apache/hadoop/hdfs/FileChecksumHelper.java  | 365 +--
 .../hdfs/client/HdfsClientConfigKeys.java   |   2 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  27 ++
 .../hdfs/protocol/BlockChecksumOptions.java |  54 +++
 .../hadoop/hdfs/protocol/BlockChecksumType.java |  30 ++
 .../datatransfer/DataTransferProtocol.java  |  12 +-
 .../hdfs/protocol/datatransfer/Sender.java  |  11 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  44 +++
 .../src/main/proto/datatransfer.proto   |   7 +-
 .../src/main/proto/hdfs.proto   |  21 ++
 .../hdfs/protocol/datatransfer/Receiver.java|   8 +-
 .../server/datanode/BlockChecksumHelper.java| 289 +--
 .../hdfs/server/datanode/DataXceiver.java   |  26 +-
 ...dBlockChecksumCompositeCrcReconstructor.java |  80 
 ...StripedBlockChecksumMd5CrcReconstructor.java |  74 
 .../StripedBlockChecksumReconstructor.java  |  66 ++--
 .../erasurecode/StripedBlockReconstructor.java  |   1 +
 .../src/main/resources/hdfs-default.xml |  11 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  31 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java| 101 -
 .../hdfs/TestFileChecksumCompositeCrc.java  |  47 +++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  14 +
 .../hadoop/tools/mapred/TestCopyMapper.java | 173 +++--
 .../mapred/TestCopyMapperCompositeCrc.java  |  50 +++
 34 files changed, 2359 insertions(+), 242 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c9cdad6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CompositeCrcFileChecksum.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CompositeCrcFileChecksum.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CompositeCrcFileChecksum.java
new file mode 100644
index 000..e1ed5cb
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CompositeCrcFileChecksum.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.util.CrcUtil;
+import org.apache.hadoop.util.DataChecksum;
+
+/** Composite CRC. */
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Unstable
+public class CompositeCrcFileChecksum extends FileChecksum {
+  public static final int LENGTH = Integer.SIZE / Byte.SIZE;
+
+  private int crc;
+  private DataChecksum.Type crcType;
+

[44/44] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2018-04-13 Thread xyao
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72a3743c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72a3743c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72a3743c

Branch: refs/heads/HDFS-7240
Commit: 72a3743cc49d9c7b8d2eaec8064a25b8d890c267
Parents: 66610b5 995cba6
Author: Xiaoyu Yao 
Authored: Fri Apr 13 17:00:19 2018 -0700
Committer: Xiaoyu Yao 
Committed: Fri Apr 13 17:00:19 2018 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   |  11 +-
 .../crypto/key/kms/KMSClientProvider.java   | 212 
 .../crypto/key/kms/KMSDelegationToken.java  |  22 +-
 .../crypto/key/kms/KMSLegacyTokenRenewer.java   |  56 ++
 .../hadoop/crypto/key/kms/KMSTokenRenewer.java  | 103 
 .../hadoop/crypto/key/kms/package-info.java |  18 +
 .../apache/hadoop/fs/ChecksumFileSystem.java|   9 +-
 .../fs/CommonConfigurationKeysPublic.java   |  10 +
 .../hadoop/fs/CompositeCrcFileChecksum.java |  82 +++
 .../java/org/apache/hadoop/fs/FileSystem.java   |   2 +-
 .../main/java/org/apache/hadoop/fs/Options.java |  11 +
 .../org/apache/hadoop/fs/shell/Command.java |  69 ++-
 .../apache/hadoop/fs/shell/CopyCommands.java|   6 +
 .../java/org/apache/hadoop/fs/shell/Ls.java |  26 +-
 .../org/apache/hadoop/fs/shell/PathData.java|  27 +
 .../web/DelegationTokenAuthenticatedURL.java|  21 +-
 .../DelegationTokenAuthenticationHandler.java   |   8 +-
 .../web/DelegationTokenAuthenticator.java   |   2 +-
 .../hadoop/service/launcher/IrqHandler.java |   2 +-
 .../org/apache/hadoop/util/CrcComposer.java | 187 +++
 .../java/org/apache/hadoop/util/CrcUtil.java| 220 
 .../org/apache/hadoop/util/DataChecksum.java|  18 +
 .../java/org/apache/hadoop/util/KMSUtil.java|  45 +-
 .../hadoop/util/KMSUtilFaultInjector.java   |  49 ++
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 ...rg.apache.hadoop.security.token.TokenRenewer |   3 +-
 .../src/main/resources/core-default.xml |  20 +
 .../apache/hadoop/conf/TestConfiguration.java   |  26 +-
 .../crypto/key/kms/TestKMSClientProvider.java   | 162 ++
 .../kms/TestLoadBalancingKMSClientProvider.java |  67 ++-
 .../apache/hadoop/fs/shell/find/TestFind.java   |  34 +-
 .../org/apache/hadoop/util/TestCrcComposer.java | 242 +
 .../org/apache/hadoop/util/TestCrcUtil.java | 232 +
 .../org/apache/hadoop/util/TestKMSUtil.java |  65 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 519 ---
 .../main/java/org/apache/hadoop/fs/Hdfs.java|   4 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  56 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |   5 +-
 .../apache/hadoop/hdfs/FileChecksumHelper.java  | 365 -
 .../hdfs/client/HdfsClientConfigKeys.java   |   2 +
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  27 +
 .../hdfs/protocol/BlockChecksumOptions.java |  54 ++
 .../hadoop/hdfs/protocol/BlockChecksumType.java |  30 ++
 .../datatransfer/DataTransferProtocol.java  |  12 +-
 .../hdfs/protocol/datatransfer/Sender.java  |  11 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  44 ++
 .../ha/RequestHedgingProxyProvider.java |   3 -
 .../hdfs/shortcircuit/ShortCircuitCache.java|  11 +-
 .../src/main/proto/datatransfer.proto   |   7 +-
 .../src/main/proto/hdfs.proto   |  21 +
 .../ha/TestRequestHedgingProxyProvider.java |  34 --
 .../native/libhdfspp/include/hdfspp/hdfspp.h|  53 +-
 .../native/libhdfspp/include/hdfspp/ioservice.h | 140 +
 .../native/libhdfspp/lib/bindings/c/hdfs.cc |   7 +-
 .../native/libhdfspp/lib/common/CMakeLists.txt  |   2 +-
 .../native/libhdfspp/lib/common/async_stream.h  |  13 +-
 .../libhdfspp/lib/common/continuation/asio.h|   5 -
 .../libhdfspp/lib/common/hdfs_ioservice.cc  | 146 --
 .../libhdfspp/lib/common/hdfs_ioservice.h   |  79 ---
 .../libhdfspp/lib/common/ioservice_impl.cc  | 159 ++
 .../libhdfspp/lib/common/ioservice_impl.h   |  76 +++
 .../main/native/libhdfspp/lib/common/logging.h  |   3 -
 .../libhdfspp/lib/common/namenode_info.cc   |  15 +-
 .../native/libhdfspp/lib/common/namenode_info.h |   8 +-
 .../main/native/libhdfspp/lib/common/util.cc|  14 +-
 .../src/main/native/libhdfspp/lib/common/util.h |  25 +-
 .../lib/connection/datanodeconnection.cc|  27 +-
 .../lib/connection/datanodeconnection.h |  26 +-
 .../main/native/libhdfspp/lib/fs/filehandle.cc  |  18 +-
 .../main/native/libhdfspp/lib/fs/filehandle.h   |  12 +-
 .../main/native/libhdfspp/lib/fs/filesystem.cc  |  67 ++-
 .../main/native/libhdfspp/lib/fs/filesystem.h   |  66 +--
 .../libhdfspp/lib/fs/namenode_operations.h  |   4 +-
 .../native/libhdfspp/lib/reader/block_reader.cc |  18 +-
 .../native/libhdfspp/lib/reader/bl

[42/44] hadoop git commit: YARN-8142. Improve SIGTERM handling for YARN Service Application Master. Contributed by Billie Rinaldi

2018-04-13 Thread xyao
YARN-8142.  Improve SIGTERM handling for YARN Service Application Master.
Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9031a76d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9031a76d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9031a76d

Branch: refs/heads/HDFS-7240
Commit: 9031a76d447f0c5eaa392144fd17c5b9812e1b20
Parents: e66e287
Author: Eric Yang 
Authored: Fri Apr 13 15:34:33 2018 -0400
Committer: Eric Yang 
Committed: Fri Apr 13 15:34:33 2018 -0400

--
 .../hadoop/yarn/service/ClientAMService.java|  1 +
 .../hadoop/yarn/service/ServiceScheduler.java   | 41 +++
 .../hadoop/yarn/service/ServiceTestUtils.java   | 11 +++
 .../yarn/service/TestYarnNativeServices.java| 71 
 4 files changed, 110 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9031a76d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
index 08c36f4..3d037e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java
@@ -125,6 +125,7 @@ public class ClientAMService extends AbstractService
 LOG.info("Stop the service by {}", UserGroupInformation.getCurrentUser());
 context.scheduler.getDiagnostics()
 .append("Stopped by user " + UserGroupInformation.getCurrentUser());
+context.scheduler.setGracefulStop();
 
 // Stop the service in 2 seconds delay to make sure this rpc call is 
completed.
 // shutdown hook will be executed which will stop AM gracefully.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9031a76d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 0fcca16..7eddef9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -156,6 +156,8 @@ public class ServiceScheduler extends CompositeService {
   // requests for a single service is not recommended.
   private boolean hasAtLeastOnePlacementConstraint;
 
+  private boolean gracefulStop = false;
+
   public ServiceScheduler(ServiceContext context) {
 super(context.service.getName());
 this.context = context;
@@ -199,6 +201,7 @@ public class ServiceScheduler extends CompositeService {
 addIfService(amRMClient);
 
 nmClient = createNMClient();
+nmClient.getClient().cleanupRunningContainersOnStop(false);
 addIfService(nmClient);
 
 dispatcher = new AsyncDispatcher("Component  dispatcher");
@@ -252,6 +255,11 @@ public class ServiceScheduler extends CompositeService {
 .createAMRMClientAsync(1000, new AMRMClientCallback());
   }
 
+  protected void setGracefulStop() {
+this.gracefulStop = true;
+nmClient.getClient().cleanupRunningContainersOnStop(true);
+  }
+
   @Override
   public void serviceInit(Configuration conf) throws Exception {
 try {
@@ -266,26 +274,31 @@ public class ServiceScheduler extends CompositeService {
   public void serviceStop() throws Exception {
 LOG.info("Stopping service scheduler");
 
-// Mark component-instances/containers as STOPPED
-if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
-  for (ContainerId containerId : getLiveInsta

[13/44] hadoop git commit: YARN-8127. Resource leak when async scheduling is enabled. Contributed by Tao Yang.

2018-04-13 Thread xyao
YARN-8127. Resource leak when async scheduling is enabled. Contributed by Tao 
Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7eb783e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7eb783e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7eb783e2

Branch: refs/heads/HDFS-7240
Commit: 7eb783e2634d8c11fb646f1f2fdf597336325312
Parents: b0aff8a
Author: Weiwei Yang 
Authored: Wed Apr 11 17:15:25 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Apr 11 17:15:25 2018 +0800

--
 .../scheduler/common/fica/FiCaSchedulerApp.java | 10 +++
 .../TestCapacitySchedulerAsyncScheduling.java   | 91 
 2 files changed, 101 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7eb783e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 32b2cad..3ec8191 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -339,6 +339,16 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
 return false;
   }
 }
+// If allocate from reserved container, make sure node is still reserved
+if (allocation.getAllocateFromReservedContainer() != null
+&& reservedContainerOnNode == null) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Try to allocate from reserved container " + allocation
+.getAllocateFromReservedContainer().getRmContainer()
+.getContainerId() + ", but node is not reserved");
+  }
+  return false;
+}
 
 // Do we have enough space on this node?
 Resource availableResource = Resources.clone(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7eb783e2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index 18cd942..338b9f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -594,6 +594,97 @@ public class TestCapacitySchedulerAsyncScheduling {
 }
   }
 
+  // Testcase for YARN-8127
+  @Test (timeout = 3)
+  public void testCommitDuplicatedAllocateFromReservedProposals()
+  throws Exception {
+// disable async-scheduling for simulating complex scene
+Configuration disableAsyncConf = new Configuration(conf);
+disableAsyncConf.setBoolean(
+CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false);
+
+// init RM & NMs
+final MockRM rm = new MockRM(disableAsyncConf);
+rm.start();
+final MockNM nm1 = rm.registerNode("192.168.0.1:1234", 8 * GB);
+rm.registerNode("192.168.0.2:2234", 8 * GB);
+
+// init scheduler & nodes
+while (
+((CapacityScheduler) rm.getRMContext().getScheduler()).getNodeTracker()
+.nodeCount() < 2) {
+  Thread.sleep(10);
+}
+Assert.assertEquals(2,
+((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+.getNodeTracker().nodeCount());
+CapacityScheduler cs 

[37/44] hadoop git commit: HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. Contributed by Erik Krogen

2018-04-13 Thread xyao
HADOOP-14970. MiniHadoopClusterManager doesn't respect lack of format option. 
Contributed by Erik Krogen

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a407bc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a407bc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a407bc9

Branch: refs/heads/HDFS-7240
Commit: 1a407bc9906306801690bc75ff0f0456f8f265fd
Parents: 226bedc
Author: Erik Krogen 
Authored: Thu Apr 12 23:27:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Thu Apr 12 23:27:51 2018 -0700

--
 .../java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a407bc9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index b89cdc0..d29dd34 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -156,6 +156,7 @@ public class MiniHadoopClusterManager {
 if (!noDFS) {
   dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
   .nameNodeHttpPort(nnHttpPort).numDataNodes(numDataNodes)
+  .format(dfsOpts == StartupOption.FORMAT)
   .startupOption(dfsOpts).build();
   LOG.info("Started MiniDFSCluster -- namenode on port "
   + dfs.getNameNodePort());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/44] hadoop git commit: HDFS-13330. ShortCircuitCache#fetchOrCreate never retries. Contributed by Gabor Bota.

2018-04-13 Thread xyao
HDFS-13330. ShortCircuitCache#fetchOrCreate never retries. Contributed by Gabor 
Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e66e287e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e66e287e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e66e287e

Branch: refs/heads/HDFS-7240
Commit: e66e287efe2b43e710137a628f03c7df3ebdf498
Parents: 6503593
Author: Wei-Chiu Chuang 
Authored: Fri Apr 13 09:17:34 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Apr 13 09:17:34 2018 -0700

--
 .../hdfs/shortcircuit/ShortCircuitCache.java| 11 ++---
 .../shortcircuit/TestShortCircuitCache.java | 26 
 2 files changed, 33 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e66e287e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index b26652b..c2f0350 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -664,6 +664,7 @@ public class ShortCircuitCache implements Closeable {
 unref(replica);
   }
 
+  static final int FETCH_OR_CREATE_RETRY_TIMES = 3;
   /**
* Fetch or create a replica.
*
@@ -678,11 +679,11 @@ public class ShortCircuitCache implements Closeable {
*/
   public ShortCircuitReplicaInfo fetchOrCreate(ExtendedBlockId key,
   ShortCircuitReplicaCreator creator) {
-Waitable newWaitable = null;
+Waitable newWaitable;
 lock.lock();
 try {
   ShortCircuitReplicaInfo info = null;
-  do {
+  for (int i = 0; i < FETCH_OR_CREATE_RETRY_TIMES; i++){
 if (closed) {
   LOG.trace("{}: can't fethchOrCreate {} because the cache is closed.",
   this, key);
@@ -692,11 +693,12 @@ public class ShortCircuitCache implements Closeable {
 if (waitable != null) {
   try {
 info = fetch(key, waitable);
+break;
   } catch (RetriableException e) {
 LOG.debug("{}: retrying {}", this, e.getMessage());
   }
 }
-  } while (false);
+  }
   if (info != null) return info;
   // We need to load the replica ourselves.
   newWaitable = new Waitable<>(lock.newCondition());
@@ -717,7 +719,8 @@ public class ShortCircuitCache implements Closeable {
*
* @throws RetriableException   If the caller needs to retry.
*/
-  private ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
+  @VisibleForTesting // ONLY for testing
+  protected ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
   Waitable waitable) throws RetriableException {
 // Another thread is already in the process of loading this
 // ShortCircuitReplica.  So we simply wait for it to complete.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e66e287e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 7ba0edc..5da6a25 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.ShortCircuitReplica
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -793,4 +794,29 @@ public class TestShortCircuitCache {
 cluster.shutdown();
 sockDir.close();
   }
+
+  @Test
+  public void testFetchOrCreateRetries() throws Exception {
+try(ShortCircuitCache cache = Mockito
+.spy(new ShortCircuitCache(10, 1000, 10, 1000, 1, 1, 0))) {
+  final TestFileDescriptorPair pair

[39/44] hadoop git commit: HDFS-13438. Fix javadoc in FsVolumeList#removeVolume. Contributed by Shashikant Banerjee.

2018-04-13 Thread xyao
HDFS-13438. Fix javadoc in FsVolumeList#removeVolume. Contributed by Shashikant 
Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa8b88ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa8b88ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa8b88ab

Branch: refs/heads/HDFS-7240
Commit: fa8b88ab2b272b29cf116a5de038d78fc4357b9d
Parents: 0725953
Author: Bharat Viswanadham 
Authored: Fri Apr 13 08:56:02 2018 -0700
Committer: Bharat Viswanadham 
Committed: Fri Apr 13 08:56:02 2018 -0700

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa8b88ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 75baf84..8f52ea7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -335,7 +335,7 @@ class FsVolumeList {
 
   /**
* Dynamically remove volume in the list.
-   * @param volume the volume to be removed.
+   * @param storageLocation {@link StorageLocation} of the volume to be 
removed.
* @param clearFailure set true to remove failure info for this volume.
*/
   void removeVolume(StorageLocation storageLocation, boolean clearFailure) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/44] hadoop git commit: HDFS-13426. Fix javadoc in FsDatasetAsyncDiskService#removeVolume. Contributed by Shashikant Banerjee.

2018-04-13 Thread xyao
HDFS-13426. Fix javadoc in FsDatasetAsyncDiskService#removeVolume. Contributed 
by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ed8511a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ed8511a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ed8511a

Branch: refs/heads/HDFS-7240
Commit: 7ed8511ad8daff19f765e78e4dca07cdebc2c2b2
Parents: b859785
Author: Mukul Kumar Singh 
Authored: Thu Apr 12 20:12:31 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu Apr 12 20:12:31 2018 +0530

--
 .../server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ed8511a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
index 9174cb0..4929b5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@@ -137,7 +138,7 @@ class FsDatasetAsyncDiskService {
 
   /**
* Stops AsyncDiskService for a volume.
-   * @param volume the root of the volume.
+   * @param storageId id of {@link StorageDirectory}.
*/
   synchronized void removeVolume(String storageId) {
 if (executors == null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/44] hadoop git commit: YARN-7984. Improved YARN service stop/destroy and clean up. Contributed by Billie Rinaldi

2018-04-13 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 66610b5fd -> 72a3743cc


YARN-7984. Improved YARN service stop/destroy and clean up.
   Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5537990
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5537990
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5537990

Branch: refs/heads/HDFS-7240
Commit: d553799030a5a64df328319aceb35734d0b2de20
Parents: 8ab776d
Author: Eric Yang 
Authored: Tue Apr 10 17:40:49 2018 -0400
Committer: Eric Yang 
Committed: Tue Apr 10 17:40:49 2018 -0400

--
 .../hadoop/yarn/service/webapp/ApiServer.java   | 47 +++---
 .../hadoop/yarn/service/ServiceClientTest.java  |  6 ++
 .../hadoop/yarn/service/TestApiServer.java  | 26 ++
 .../yarn/service/client/ServiceClient.java  | 93 +++-
 .../hadoop/yarn/service/ServiceTestUtils.java   | 15 +++-
 .../yarn/service/TestYarnNativeServices.java| 42 -
 6 files changed, 191 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5537990/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 59ee05d..14c77f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -231,30 +231,40 @@ public class ApiServer {
   e.getCause().getMessage());
 } catch (YarnException | FileNotFoundException e) {
   return formatResponse(Status.NOT_FOUND, e.getMessage());
-} catch (IOException | InterruptedException e) {
+} catch (Exception e) {
   LOG.error("Fail to stop service: {}", e);
   return formatResponse(Status.INTERNAL_SERVER_ERROR, e.getMessage());
 }
   }
 
   private Response stopService(String appName, boolean destroy,
-  final UserGroupInformation ugi) throws IOException,
-  InterruptedException, YarnException, FileNotFoundException {
+  final UserGroupInformation ugi) throws Exception {
 int result = ugi.doAs(new PrivilegedExceptionAction() {
   @Override
-  public Integer run() throws IOException, YarnException,
-  FileNotFoundException {
+  public Integer run() throws Exception {
 int result = 0;
 ServiceClient sc = getServiceClient();
 sc.init(YARN_CONFIG);
 sc.start();
-result = sc.actionStop(appName, destroy);
-if (result == EXIT_SUCCESS) {
-  LOG.info("Successfully stopped service {}", appName);
+Exception stopException = null;
+try {
+  result = sc.actionStop(appName, destroy);
+  if (result == EXIT_SUCCESS) {
+LOG.info("Successfully stopped service {}", appName);
+  }
+} catch (Exception e) {
+  LOG.info("Got exception stopping service", e);
+  stopException = e;
 }
 if (destroy) {
   result = sc.actionDestroy(appName);
-  LOG.info("Successfully deleted service {}", appName);
+  if (result == EXIT_SUCCESS) {
+LOG.info("Successfully deleted service {}", appName);
+  }
+} else {
+  if (stopException != null) {
+throw stopException;
+  }
 }
 sc.close();
 return result;
@@ -262,8 +272,21 @@ public class ApiServer {
 });
 ServiceStatus serviceStatus = new ServiceStatus();
 if (destroy) {
-  serviceStatus.setDiagnostics("Successfully destroyed service " +
-  appName);
+  if (result == EXIT_SUCCESS) {
+serviceStatus.setDiagnostics("Successfully destroyed service " +
+appName);
+  } else {
+if (result == EXIT_NOT_FOUND) {
+  serviceStatus
+  .setDiagnostics("Service " + appName + " doesn't exist");
+  return formatResponse(Status.BAD_REQUEST, serviceStatus);
+} else {
+  serviceStatus
+  .setDiagnostics("Service " + appName + " error cleaning up " +
+  "registry");
+  return formatResponse(Status.INTERNAL_SERVER_ERROR, serviceStatus);
+}
+  }
 } else {
  

[21/44] hadoop git commit: HDFS-13427. Fix the section titles of transparent encryption document.

2018-04-13 Thread xyao
HDFS-13427. Fix the section titles of transparent encryption document.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7cd362a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7cd362a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7cd362a

Branch: refs/heads/HDFS-7240
Commit: c7cd362afd21add324c3a82c594b133d41cf8d03
Parents: 0d898b7
Author: Akira Ajisaka 
Authored: Thu Apr 12 10:44:56 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 12 10:44:56 2018 +0900

--
 .../src/site/markdown/TransparentEncryption.md  | 57 ++--
 1 file changed, 29 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7cd362a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
index 3454265..d7a70b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/TransparentEncryption.md
@@ -17,12 +17,12 @@ Transparent Encryption in HDFS
 
 
 
-Overview
+Overview
 
 
 HDFS implements *transparent*, *end-to-end* encryption. Once configured, data 
read from and written to special HDFS directories is *transparently* encrypted 
and decrypted without requiring changes to user application code. This 
encryption is also *end-to-end*, which means the data can only be encrypted and 
decrypted by the client. HDFS never stores or has access to unencrypted data or 
unencrypted data encryption keys. This satisfies two typical requirements for 
encryption: *at-rest encryption* (meaning data on persistent media, such as a 
disk) as well as *in-transit encryption* (e.g. when data is travelling over the 
network).
 
-Background
+Background
 --
 
 Encryption can be done at different layers in a traditional data management 
software/hardware stack. Choosing to encrypt at a given layer comes with 
different advantages and disadvantages.
@@ -39,17 +39,17 @@ HDFS-level encryption fits between database-level and 
filesystem-level encryptio
 
 HDFS-level encryption also prevents attacks at the filesystem-level and below 
(so-called "OS-level attacks"). The operating system and disk only interact 
with encrypted bytes, since the data is already encrypted by HDFS.
 
-Use Cases
+Use Cases
 -
 
 Data encryption is required by a number of different government, financial, 
and regulatory entities. For example, the health-care industry has HIPAA 
regulations, the card payment industry has PCI DSS regulations, and the US 
government has FISMA regulations. Having transparent encryption built into HDFS 
makes it easier for organizations to comply with these regulations.
 
 Encryption can also be performed at the application-level, but by integrating 
it into HDFS, existing applications can operate on encrypted data without 
changes. This integrated architecture implies stronger encrypted file semantics 
and better coordination with other HDFS functions.
 
-Architecture
+Architecture
 
 
-### Overview
+### Overview
 
 For transparent encryption, we introduce a new abstraction to HDFS: the 
*encryption zone*. An encryption zone is a special directory whose contents 
will be transparently encrypted upon write and transparently decrypted upon 
read. Each encryption zone is associated with a single *encryption zone key* 
which is specified when the zone is created. Each file within an encryption 
zone has its own unique *data encryption key (DEK)*. DEKs are never handled 
directly by HDFS. Instead, HDFS only ever handles an *encrypted data encryption 
key (EDEK)*. Clients decrypt an EDEK, and then use the subsequent DEK to read 
and write data. HDFS datanodes simply see a stream of encrypted bytes.
 
@@ -65,7 +65,7 @@ A new cluster service is required to manage encryption keys: 
the Hadoop Key Mana
 
 The KMS will be described in more detail below.
 
-### Accessing data 
within an encryption zone
+### Accessing data within an encryption zone
 
 When creating a new file in an encryption zone, the NameNode asks the KMS to 
generate a new EDEK encrypted with the encryption zone's key. The EDEK is then 
stored persistently as part of the file's metadata on the NameNode.
 
@@ -75,7 +75,7 @@ All of the above steps for the read and write path happen 
automatically through
 
 Access to encrypted file data and metadata is controlled by normal HDFS 
filesystem permissions. This means that if HDFS is compromised (for example, by 
gaining unauthorized access to an HDFS superuser account), a malicious user 
only gains access to ciphe

[24/44] hadoop git commit: YARN-7931. [atsv2 read acls] Include domain table creation as part of schema creator. (Vrushali C via Haibo Chen)

2018-04-13 Thread xyao
YARN-7931. [atsv2 read acls] Include domain table creation as part of schema 
creator. (Vrushali C via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8597858
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8597858
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8597858

Branch: refs/heads/HDFS-7240
Commit: b8597858b17e40a99611e3a384cdd241293af83f
Parents: 113af12
Author: Haibo Chen 
Authored: Thu Apr 12 06:38:30 2018 -0700
Committer: Haibo Chen 
Committed: Thu Apr 12 06:38:30 2018 -0700

--
 .../storage/TimelineSchemaCreator.java  |  10 ++
 .../storage/domain/DomainTableRW.java   |  92 ++
 .../storage/domain/package-info.java|  28 +++
 .../storage/domain/DomainColumn.java| 111 
 .../storage/domain/DomainColumnFamily.java  |  52 ++
 .../storage/domain/DomainRowKey.java| 179 +++
 .../storage/domain/DomainTable.java |  45 +
 .../storage/domain/package-info.java|  28 +++
 .../storage/common/TestRowKeys.java |  32 
 9 files changed, 577 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8597858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index e9e4770..37ed50c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -44,6 +44,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableR
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainTableRW;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -357,6 +358,15 @@ public final class TimelineSchemaCreator {
   throw e;
 }
   }
+  try {
+new DomainTableRW().createTable(admin, hbaseConf);
+  } catch (IOException e) {
+if (skipExisting) {
+  LOG.warn("Skip and continue on: " + e.getMessage());
+} else {
+  throw e;
+}
+  }
 } finally {
   if (conn != null) {
 conn.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8597858/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/domain/DomainTableRW.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/domain/DomainTableRW.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/domain/DomainTableRW.java
new file mode 100644
index 000..1d58e40
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/domain/DomainTableRW.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * dist

[30/44] hadoop git commit: HDFS-13386. RBF: Wrong date information in list file(-ls) result. Contributed by Dibyendu Karmakar.

2018-04-13 Thread xyao
HDFS-13386. RBF: Wrong date information in list file(-ls) result. Contributed 
by Dibyendu Karmakar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/044341b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/044341b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/044341b4

Branch: refs/heads/HDFS-7240
Commit: 044341b4e4459b23159e94c7ff0601058398fd70
Parents: b5353c7
Author: Inigo Goiri 
Authored: Thu Apr 12 09:30:11 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Apr 12 09:30:11 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 19 +-
 .../federation/router/TestRouterMountTable.java | 62 +++-
 2 files changed, 79 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/044341b4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index e6d2f5e..0dc2a69 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2328,7 +2328,24 @@ public class RouterRpcServer extends AbstractService
*/
   private Map getMountPointDates(String path) {
 Map ret = new TreeMap<>();
-// TODO add when we have a Mount Table
+if (subclusterResolver instanceof MountTableResolver) {
+  MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
+  String srcPath;
+  try {
+final List children = subclusterResolver.getMountPoints(path);
+for (String child : children) {
+  if (path.equals(Path.SEPARATOR)) {
+srcPath = Path.SEPARATOR + child;
+  } else {
+srcPath = path + Path.SEPARATOR + child;
+  }
+  MountTable entry = mountTable.getMountPoint(srcPath);
+  ret.put(child, entry.getDateModified());
+}
+  } catch (IOException e) {
+LOG.error("Cannot get mount point: {}", e.getMessage());
+  }
+}
 return ret;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/044341b4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index c9e28b1..b33b998 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -17,25 +17,33 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.util.Time;
 import

[35/44] hadoop git commit: YARN-8154. Fix missing titles in PlacementConstraints document. Contributed by Weiwei Yang.

2018-04-13 Thread xyao
YARN-8154. Fix missing titles in PlacementConstraints document. Contributed by 
Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/375654c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/375654c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/375654c3

Branch: refs/heads/HDFS-7240
Commit: 375654c36a8bfa4337c9011fcd86737462dfa61e
Parents: ec1e8c1
Author: Weiwei Yang 
Authored: Fri Apr 13 13:06:47 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Apr 13 13:06:47 2018 +0800

--
 .../src/site/markdown/PlacementConstraints.md.vm | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/375654c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
index 6af62e7..cb34c3f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
@@ -12,6 +12,9 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
+#set ( $H3 = '###' )
+#set ( $H4 = '' )
+
 Placement Constraints
 =
 
@@ -35,7 +38,7 @@ Quick Guide
 
 We first describe how to enable scheduling with placement constraints and then 
provide examples of how to experiment with this feature using the distributed 
shell, an application that allows to run a given shell command on a set of 
containers.
 
-### Enabling placement constraints
+$H3 Enabling placement constraints
 
 To enable placement constraints, the following property has to be set to 
`placement-processor` or `scheduler` in **conf/yarn-site.xml**:
 
@@ -51,7 +54,7 @@ We now give more details about each of the three placement 
constraint handlers:
 
 The `placement-processor` handler supports a wider range of constraints and 
can allow more containers to be placed, especially when applications have 
demanding constraints or the cluster is highly-utilized (due to considering 
multiple containers at a time). However, if respecting task priority within an 
application is important for the user and the capacity scheduler is used, then 
the `scheduler` handler should be used instead.
 
-### Experimenting with placement constraints using distributed shell
+$H3 Experimenting with placement constraints using distributed shell
 
 Users can experiment with placement constraints by using the distributed shell 
application through the following command:
 
@@ -89,18 +92,18 @@ The above encodes two constraints:
 Defining Placement Constraints
 --
 
-### Allocation tags
+$H3 Allocation tags
 
 Allocation tags are string tags that an application can associate with (groups 
of) its containers. Tags are used to identify components of applications. For 
example, an HBase Master allocation can be tagged with "hbase-m", and Region 
Servers with "hbase-rs". Other examples are "latency-critical" to refer to the 
more general demands of the allocation, or "app_0041" to denote the job ID. 
Allocation tags play a key role in constraints, as they allow to refer to 
multiple allocations that share a common tag.
 
 Note that instead of using the `ResourceRequest` object to define allocation 
tags, we use the new `SchedulingRequest` object. This has many similarities 
with the `ResourceRequest`, but better separates the sizing of the requested 
allocations (number and size of allocations, priority, execution type, etc.), 
and the constraints dictating how these allocations should be placed (resource 
name, relaxed locality). Applications can still use `ResourceRequest` objects, 
but in order to define allocation tags and constraints, they need to use the 
`SchedulingRequest` object. Within a single `AllocateRequest`, an application 
should use either the `ResourceRequest` or the `SchedulingRequest` objects, but 
not both of them.
 
- Differences between node labels, node attributes and allocation tags
+$H4 Differences between node labels, node attributes and allocation tags
 
 The difference between allocation tags and node labels or node attributes 
(YARN-3409), is that allocation tags are attached to allocations and not to 
nodes. When an allocation gets allocated to a node by the scheduler, the set of 
tags of that allocation are automatically added to the node for the duration of 
the allocation. Hence, a node inherits the tags of the allocations that are 
currently allocated to the node. Likew

[12/44] hadoop git commit: HADOOP-15346 S3ARetryPolicy for 400/BadArgument to be "fail". Contributed by Steve Loughran.

2018-04-13 Thread xyao
HADOOP-15346 S3ARetryPolicy for 400/BadArgument to be "fail". Contributed by 
Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0aff8a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0aff8a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0aff8a9

Branch: refs/heads/HDFS-7240
Commit: b0aff8a96221fbf40d1eafe51b4f530b73146a20
Parents: 7c9cdad
Author: Aaron Fabbri 
Authored: Tue Apr 10 23:55:38 2018 -0700
Committer: Aaron Fabbri 
Committed: Tue Apr 10 23:55:38 2018 -0700

--
 .../java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java |  6 +++---
 .../java/org/apache/hadoop/fs/s3a/TestInvoker.java| 14 --
 2 files changed, 7 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0aff8a9/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
index d857330..2b361fd 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ARetryPolicy.java
@@ -175,9 +175,9 @@ public class S3ARetryPolicy implements RetryPolicy {
 // which isn't going to be recovered from
 policyMap.put(EOFException.class, retryIdempotentCalls);
 
-// policy on a 400/bad request still ambiguous. Given it
-// comes and goes on test runs: try again
-policyMap.put(AWSBadRequestException.class, connectivityFailure);
+// policy on a 400/bad request still ambiguous.
+// Treated as an immediate failure
+policyMap.put(AWSBadRequestException.class, fail);
 
 // Status 500 error code is also treated as a connectivity problem
 policyMap.put(AWSStatus500Exception.class, connectivityFailure);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0aff8a9/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java
index d29e2df..5da665c 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java
@@ -283,18 +283,12 @@ public class TestInvoker extends Assert {
   /**
* Repeatedly retry until eventually a bad request succeeds.
*/
-  @Test
-  public void testRetryBadRequestIdempotent() throws Throwable {
-final AtomicInteger counter = new AtomicInteger(0);
-final int attemptsBeforeSuccess = ACTIVE_RETRY_LIMIT;
-invoker.retry("test", null, true,
+  @Test(expected = AWSBadRequestException.class)
+  public void testRetryBadRequestNotIdempotent() throws Throwable {
+invoker.retry("test", null, false,
 () -> {
-  if (counter.incrementAndGet() < attemptsBeforeSuccess) {
-throw BAD_REQUEST;
-  }
+  throw BAD_REQUEST;
 });
-assertEquals(attemptsBeforeSuccess, counter.get());
-assertEquals("retry count ", attemptsBeforeSuccess - 1, retryCount);
   }
 
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/44] hadoop git commit: Revert "HDFS-13386. RBF: Wrong date information in list file(-ls) result. Contributed by Dibyendu Karmakar."

2018-04-13 Thread xyao
Revert "HDFS-13386. RBF: Wrong date information in list file(-ls) result. 
Contributed by Dibyendu Karmakar."

This reverts commit 18de6f2042b70f9f0d7a2620c60de022768a7b13.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d272056f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d272056f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d272056f

Branch: refs/heads/HDFS-7240
Commit: d272056fcb23f6a9252b19d349acd718d7837079
Parents: 7ed8511
Author: Inigo Goiri 
Authored: Thu Apr 12 08:42:19 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Apr 12 08:42:19 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 19 +-
 .../federation/router/TestRouterMountTable.java | 62 +---
 2 files changed, 2 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d272056f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 0dc2a69..e6d2f5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2328,24 +2328,7 @@ public class RouterRpcServer extends AbstractService
*/
   private Map getMountPointDates(String path) {
 Map ret = new TreeMap<>();
-if (subclusterResolver instanceof MountTableResolver) {
-  MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
-  String srcPath;
-  try {
-final List children = subclusterResolver.getMountPoints(path);
-for (String child : children) {
-  if (path.equals(Path.SEPARATOR)) {
-srcPath = Path.SEPARATOR + child;
-  } else {
-srcPath = path + Path.SEPARATOR + child;
-  }
-  MountTable entry = mountTable.getMountPoint(srcPath);
-  ret.put(child, entry.getDateModified());
-}
-  } catch (IOException e) {
-LOG.error("Cannot get mount point: {}", e.getMessage());
-  }
-}
+// TODO add when we have a Mount Table
 return ret;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d272056f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index b33b998..c9e28b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -17,33 +17,25 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federatio

[38/44] hadoop git commit: HDFS-13418. NetworkTopology should be configurable when enable DFSNetworkTopology. Contributed by Tao Jie.

2018-04-13 Thread xyao
HDFS-13418. NetworkTopology should be configurable when enable 
DFSNetworkTopology. Contributed by Tao Jie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0725953e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0725953e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0725953e

Branch: refs/heads/HDFS-7240
Commit: 0725953efec89b35b7586b846abb01f7c5963b37
Parents: 1a407bc
Author: Yiqun Lin 
Authored: Fri Apr 13 17:55:45 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Apr 13 17:55:45 2018 +0800

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  7 +++
 .../hadoop/hdfs/net/DFSNetworkTopology.java | 10 +++-
 .../src/main/resources/hdfs-default.xml | 14 ++
 .../blockmanagement/TestDatanodeManager.java| 52 
 4 files changed, 81 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0725953e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b4b9d97..b4dab4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
@@ -1177,6 +1178,12 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   "dfs.use.dfs.network.topology";
   public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true;
 
+  public static final String DFS_NET_TOPOLOGY_IMPL_KEY =
+  "dfs.net.topology.impl";
+
+  public static final Class DFS_NET_TOPOLOGY_IMPL_DEFAULT =
+  DFSNetworkTopology.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0725953e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
index e74cdec..7889ef4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
@@ -22,11 +22,13 @@ import com.google.common.base.Preconditions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
+import org.apache.hadoop.util.ReflectionUtils;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -44,8 +46,12 @@ public class DFSNetworkTopology extends NetworkTopology {
   private static final Random RANDOM = new Random();
 
   public static DFSNetworkTopology getInstance(Configuration conf) {
-DFSNetworkTopology nt = new DFSNetworkTopology();
-return (DFSNetworkTopology)nt.init(DFSTopologyNodeImpl.FACTORY);
+
+DFSNetworkTopology nt = ReflectionUtils.newInstance(conf.getClass(
+DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_KEY,
+DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_DEFAULT,
+DFSNetworkTopology.class), conf);
+return (DFSNetworkTopology) nt.init(DFSTopologyNodeImpl.FACTORY);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0725953e/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index f8cce60..4ed4690 100644
--- a/hadoop-h

[43/44] hadoop git commit: MAPREDUCE-7077. Pipe mapreduce job fails with Permission denied for jobTokenPassword. (Akira Ajisaka via wangda)

2018-04-13 Thread xyao
MAPREDUCE-7077. Pipe mapreduce job fails with Permission denied for 
jobTokenPassword. (Akira Ajisaka via wangda)

Change-Id: Ie8f01425d58409fa3661f768205b7616128c8aa4
(cherry picked from commit 035e0f97ea44b0495707949a781d8792dcf6ea6b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/995cba65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/995cba65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/995cba65

Branch: refs/heads/HDFS-7240
Commit: 995cba65fe29966583e36f9491d9a27b323918ae
Parents: 9031a76
Author: Wangda Tan 
Authored: Thu Apr 12 14:33:33 2018 -0700
Committer: Wangda Tan 
Committed: Fri Apr 13 13:52:10 2018 -0700

--
 .../apache/hadoop/mapred/pipes/Application.java  |  5 ++---
 .../hadoop/mapred/pipes/TestPipeApplication.java | 19 ---
 2 files changed, 10 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/995cba65/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
index 5c8aab9..83d2509 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.TaskAttemptID;
 import org.apache.hadoop.mapred.TaskLog;
-import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
@@ -104,8 +103,8 @@ class Applicationhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/995cba65/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
index 13597e0..88d8f95 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.IFile.Writer;
-import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapred.Counters;
@@ -84,10 +83,10 @@ public class TestPipeApplication {
   public void testRunner() throws Exception {
 
 // clean old password files
-JobConf conf = new JobConf();
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 try {
   RecordReader rReader = new 
ReaderPipesMapRunner();
+  JobConf conf = new JobConf();
   conf.set(Submitter.IS_JAVA_RR, "true");
   // for stdour and stderror
 
@@ -163,7 +162,7 @@ public class TestPipeApplication {
 
 TestTaskReporter reporter = new TestTaskReporter();
 
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 try {
 
   conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
@@ -248,7 +247,7 @@ public class TestPipeApplication {
 
 JobConf conf = new JobConf();
 
-File[] psw = cleanTokenPasswordFile(conf);
+File[] psw = cleanTokenPasswordFile();
 
 System.setProperty("test.build.data",
 "target/tmp/build/TEST_SUBMITTER_MAPPER/data");
@@ -389,8 +388,8 @@ public class TestPipeApplication {
   @Test
   public void testPipesReduser() throws Exception {
 
+File[] psw = cleanTokenPasswordFile();
 JobConf conf = new JobConf();
-File[] psw = clean

[19/44] hadoop git commit: HDFS-13386. RBF: Wrong date information in list file(-ls) result. Contributed by Dibyendu Karmakar.

2018-04-13 Thread xyao
HDFS-13386. RBF: Wrong date information in list file(-ls) result. Contributed 
by Dibyendu Karmakar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18de6f20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18de6f20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18de6f20

Branch: refs/heads/HDFS-7240
Commit: 18de6f2042b70f9f0d7a2620c60de022768a7b13
Parents: 933477e
Author: Inigo Goiri 
Authored: Wed Apr 11 12:03:14 2018 -0700
Committer: Inigo Goiri 
Committed: Wed Apr 11 12:03:14 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 19 +-
 .../federation/router/TestRouterMountTable.java | 62 +++-
 2 files changed, 79 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18de6f20/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index e6d2f5e..0dc2a69 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2328,7 +2328,24 @@ public class RouterRpcServer extends AbstractService
*/
   private Map getMountPointDates(String path) {
 Map ret = new TreeMap<>();
-// TODO add when we have a Mount Table
+if (subclusterResolver instanceof MountTableResolver) {
+  MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
+  String srcPath;
+  try {
+final List children = subclusterResolver.getMountPoints(path);
+for (String child : children) {
+  if (path.equals(Path.SEPARATOR)) {
+srcPath = Path.SEPARATOR + child;
+  } else {
+srcPath = path + Path.SEPARATOR + child;
+  }
+  MountTable entry = mountTable.getMountPoint(srcPath);
+  ret.put(child, entry.getDateModified());
+}
+  } catch (IOException e) {
+LOG.error("Cannot get mount point: {}", e.getMessage());
+  }
+}
 return ret;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18de6f20/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
index c9e28b1..b33b998 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
@@ -17,25 +17,33 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.util.Time;
 import

[08/44] hadoop git commit: YARN-7941. Transitive dependencies for component are not resolved. Contributed by Billie Rinaldi.

2018-04-13 Thread xyao
YARN-7941. Transitive dependencies for component are not resolved. Contributed 
by Billie Rinaldi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0487110
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0487110
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0487110

Branch: refs/heads/HDFS-7240
Commit: c0487110990958fa985d273eb178bdf76002cf3a
Parents: d919eb6
Author: Rohith Sharma K S 
Authored: Wed Apr 11 09:18:50 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:18:50 2018 +0530

--
 .../hadoop/yarn/service/component/Component.java|  1 +
 .../hadoop/yarn/service/TestYarnNativeServices.java | 16 
 2 files changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0487110/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index a2127c8..39897f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -179,6 +179,7 @@ public class Component implements 
EventHandler {
 maxContainerFailurePerComp = componentSpec.getConfiguration()
 .getPropertyInt(CONTAINER_FAILURE_THRESHOLD, 10);
 createNumCompInstances(component.getNumberOfContainers());
+setDesiredContainers(component.getNumberOfContainers().intValue());
   }
 
   private void createNumCompInstances(long count) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0487110/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 2b44701..5e267bb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -166,7 +166,9 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 
   // Create compa with 2 containers
   // Create compb with 2 containers which depends on compa
-  // Check containers for compa started before containers for compb
+  // Create compc with 2 containers which depends on compb
+  // Check containers for compa started before containers for compb before
+  // containers for compc
   @Test (timeout = 20)
   public void testComponentStartOrder() throws Exception {
 setupInternal(NUM_NMS);
@@ -175,17 +177,23 @@ public class TestYarnNativeServices extends 
ServiceTestUtils {
 exampleApp.setName("teststartorder");
 exampleApp.setVersion("v1");
 exampleApp.addComponent(createComponent("compa", 2, "sleep 1000"));
-Component compb = createComponent("compb", 2, "sleep 1000");
 
-// Let compb depedends on compa;
+// Let compb depend on compa
+Component compb = createComponent("compb", 2, "sleep 1000");
 compb.setDependencies(Collections.singletonList("compa"));
 exampleApp.addComponent(compb);
 
+// Let compc depend on compb
+Component compc = createComponent("compc", 2, "sleep 1000");
+compc.setDependencies(Collections.singletonList("compb"));
+exampleApp.addComponent(compc);
+
 client.actionCreate(exampleApp);
 waitForServiceToBeStable(client, exampleApp);
 
 // check that containers for compa are launched before containers for compb
-checkContainerLaunchDependencies(client, exampleApp, "compa", "compb");
+

[09/44] hadoop git commit: MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 purpose. Contributed by Charan Hebri.

2018-04-13 Thread xyao
MAPREDUCE-7062. Update mapreduce.job.tags description for making use for ATSv2 
purpose. Contributed by Charan Hebri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cc59a09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cc59a09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cc59a09

Branch: refs/heads/HDFS-7240
Commit: 6cc59a09e7330dc893b386d84c8f2aa86c02eace
Parents: c048711
Author: Rohith Sharma K S 
Authored: Wed Apr 11 09:45:39 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Apr 11 09:45:39 2018 +0530

--
 .../src/main/resources/mapred-default.xml  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cc59a09/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index cf8be33..d47c0ff 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1045,6 +1045,12 @@
 
  Tags for the job that will be passed to YARN at submission
   time. Queries to YARN for applications can filter on these tags.
+  If these tags are intended to be used with The YARN Timeline Service v.2,
+  prefix them with the appropriate tag names for flow name, flow version 
and
+  flow run id. Example:
+  timeline_flow_name_tag:foo,
+  timeline_flow_version_tag:3df8b0d6100530080d2e0decf9e528e57c42a90a,
+  timeline_flow_run_id_tag:1465246348599
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/44] hadoop git commit: HADOOP-15350. [JDK10] Update maven plugin tools to fix compile error in hadoop-maven-plugins module

2018-04-13 Thread xyao
HADOOP-15350. [JDK10] Update maven plugin tools to fix compile error in 
hadoop-maven-plugins module

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/832852ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/832852ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/832852ce

Branch: refs/heads/HDFS-7240
Commit: 832852ce4ff00d4aa698e89e1df39e5bf0df78b9
Parents: c7cd362
Author: Takanobu Asanuma 
Authored: Thu Apr 12 17:19:35 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Apr 12 17:19:35 2018 +0900

--
 hadoop-maven-plugins/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/832852ce/hadoop-maven-plugins/pom.xml
--
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index bd347d6..b31d158 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -27,7 +27,7 @@
   Apache Hadoop Maven Plugins
   
 3.0
-3.4
+3.5.1
   
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8156. Increase the default value of yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan Hebri.

2018-04-13 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 995cba65f -> 669eb7bde


YARN-8156. Increase the default value of 
yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan 
Hebri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/669eb7bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/669eb7bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/669eb7bd

Branch: refs/heads/trunk
Commit: 669eb7bdea34f26e9b9b8a2260ae4356791622e7
Parents: 995cba6
Author: Rohith Sharma K S 
Authored: Sat Apr 14 10:31:28 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Apr 14 10:31:28 2018 +0530

--
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java   | 2 +-
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 .../collector/TestPerNodeTimelineCollectorsAuxService.java | 2 ++
 .../hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md| 2 +-
 4 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/669eb7bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d2a71bc..8aa136d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2660,7 +2660,7 @@ public class YarnConfiguration extends Configuration {
   public static final String ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS =
   TIMELINE_SERVICE_PREFIX + "app-collector.linger-period.ms";
 
-  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 1000;
+  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 
6;
 
   public static final String NUMBER_OF_ASYNC_ENTITIES_TO_MERGE =
   TIMELINE_SERVICE_PREFIX

http://git-wip-us.apache.org/repos/asf/hadoop/blob/669eb7bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index def0816..85915c2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2499,7 +2499,7 @@
 Time period till which the application collector will be alive
  in NM, after the  application master container finishes.
 yarn.timeline-service.app-collector.linger-period.ms
-1000
+6
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/669eb7bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
index f27bf63..04b89d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
@@ -66,6 +66,8 @@ public class TestPerNodeTimelineCollectorsAuxService {
 conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
 conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
 FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+conf.setLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,
+1000L);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/669eb7bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-s

hadoop git commit: YARN-8156. Increase the default value of yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan Hebri.

2018-04-13 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 ca8bb322b -> eb1026dff


YARN-8156. Increase the default value of 
yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan 
Hebri.

(cherry picked from commit 669eb7bdea34f26e9b9b8a2260ae4356791622e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb1026df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb1026df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb1026df

Branch: refs/heads/branch-3.1
Commit: eb1026dffaa04d17148f584a10970e97ca6d5916
Parents: ca8bb32
Author: Rohith Sharma K S 
Authored: Sat Apr 14 10:31:28 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Apr 14 10:57:02 2018 +0530

--
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java   | 2 +-
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 .../collector/TestPerNodeTimelineCollectorsAuxService.java | 2 ++
 .../hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md| 2 +-
 4 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb1026df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 58c288b..208f85c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2630,7 +2630,7 @@ public class YarnConfiguration extends Configuration {
   public static final String ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS =
   TIMELINE_SERVICE_PREFIX + "app-collector.linger-period.ms";
 
-  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 1000;
+  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 
6;
 
   public static final String NUMBER_OF_ASYNC_ENTITIES_TO_MERGE =
   TIMELINE_SERVICE_PREFIX

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb1026df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 114ba4b..58f32bf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2491,7 +2491,7 @@
 Time period till which the application collector will be alive
  in NM, after the  application master container finishes.
 yarn.timeline-service.app-collector.linger-period.ms
-1000
+6
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb1026df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
index f27bf63..04b89d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
@@ -66,6 +66,8 @@ public class TestPerNodeTimelineCollectorsAuxService {
 conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
 conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
 FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+conf.setLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,
+1000L);
   }
 
   @After

http://git-wip-us.apache.

hadoop git commit: YARN-8156. Increase the default value of yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan Hebri.

2018-04-13 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 dc01e323e -> 7e3797947


YARN-8156. Increase the default value of 
yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan 
Hebri.

(cherry picked from commit 669eb7bdea34f26e9b9b8a2260ae4356791622e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e379794
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e379794
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e379794

Branch: refs/heads/branch-3.0
Commit: 7e37979472583710400679660ee5546339e88af8
Parents: dc01e32
Author: Rohith Sharma K S 
Authored: Sat Apr 14 10:31:28 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Apr 14 11:10:16 2018 +0530

--
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java   | 2 +-
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 .../collector/TestPerNodeTimelineCollectorsAuxService.java | 2 ++
 .../hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md| 2 +-
 4 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e379794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8853ae6..b4f2414 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2392,7 +2392,7 @@ public class YarnConfiguration extends Configuration {
   public static final String ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS =
   TIMELINE_SERVICE_PREFIX + "app-collector.linger-period.ms";
 
-  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 1000;
+  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 
6;
 
   public static final String NUMBER_OF_ASYNC_ENTITIES_TO_MERGE =
   TIMELINE_SERVICE_PREFIX

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e379794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index f3a9b09..6d69a10 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2377,7 +2377,7 @@
 Time period till which the application collector will be alive
  in NM, after the  application master container finishes.
 yarn.timeline-service.app-collector.linger-period.ms
-1000
+6
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e379794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
index cb9ced0..f96466f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
@@ -64,6 +64,8 @@ public class TestPerNodeTimelineCollectorsAuxService {
 conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
 conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
 FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+conf.setLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,
+1000L);
   }
 
   @After

http://git-wip-us.apache.

hadoop git commit: YARN-8156. Increase the default value of yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan Hebri.

2018-04-13 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cc2a2a8e0 -> 900e6b435


YARN-8156. Increase the default value of 
yarn.timeline-service.app-collector.linger-period.ms. Contributed by Charan 
Hebri.

(cherry picked from commit 669eb7bdea34f26e9b9b8a2260ae4356791622e7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/900e6b43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/900e6b43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/900e6b43

Branch: refs/heads/branch-2
Commit: 900e6b435a32bb0726275faf86bb5b7382a5572e
Parents: cc2a2a8
Author: Rohith Sharma K S 
Authored: Sat Apr 14 10:31:28 2018 +0530
Committer: Rohith Sharma K S 
Committed: Sat Apr 14 11:15:30 2018 +0530

--
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java   | 2 +-
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 .../collector/TestPerNodeTimelineCollectorsAuxService.java | 2 ++
 .../hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md.vm | 2 +-
 4 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/900e6b43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b058e83..3f0c735 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2338,7 +2338,7 @@ public class YarnConfiguration extends Configuration {
   public static final String ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS =
   TIMELINE_SERVICE_PREFIX + "app-collector.linger-period.ms";
 
-  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 1000;
+  public static final int DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS = 
6;
 
   public static final String NUMBER_OF_ASYNC_ENTITIES_TO_MERGE =
   TIMELINE_SERVICE_PREFIX

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900e6b43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 24279f5..3e5e5ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2380,7 +2380,7 @@
 Time period till which the application collector will be alive
  in NM, after the  application master container finishes.
 yarn.timeline-service.app-collector.linger-period.ms
-1000
+6
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/900e6b43/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
index 0320739..9d2bb24 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestPerNodeTimelineCollectorsAuxService.java
@@ -70,6 +70,8 @@ public class TestPerNodeTimelineCollectorsAuxService {
 conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
 conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
 FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+conf.setLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,
+1000L);
   }
 
   @After

http://git-wip-us.apache.org/