[5/5] hadoop git commit: YARN-8370. Some Node Manager tests fail on Windows due to improper path/file separator. Contributed by Anbang Hu.

2018-06-11 Thread inigoiri
YARN-8370. Some Node Manager tests fail on Windows due to improper path/file 
separator. Contributed by Anbang Hu.

(cherry picked from commit 2b2f672022547e8c19658213ac5a4090bf5b6c72)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5f3a050
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5f3a050
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5f3a050

Branch: refs/heads/branch-2.9
Commit: d5f3a050fd8cc9df1dc32dd18c628da71be8c294
Parents: 8814154
Author: Inigo Goiri 
Authored: Mon Jun 11 19:25:56 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 19:27:54 2018 -0700

--
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java | 4 ++--
 .../logaggregation/TestAppLogAggregatorImpl.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f3a050/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index e704c8f..4183fbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -150,9 +150,9 @@ public class TestLocalDirsHandlerService {
 Assert
   .assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
 
-Assert.assertEquals(localDir2,
+Assert.assertEquals(new Path(localDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
-Assert.assertEquals(logDir2,
+Assert.assertEquals(new Path(logDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
 Assert.assertEquals(localDir1 + "," + localDir2,
 dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f3a050/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index 37ffd00..269dbab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -316,7 +316,7 @@ public class TestAppLogAggregatorImpl {
   for(int i = 0; i < tasks.length; i++) {
 FileDeletionTask task = (FileDeletionTask) tasks[i];
 for (Path path: task.getBaseDirs()) {
-  paths.add(path.toUri().getRawPath());
+  paths.add(new File(path.toUri().getRawPath()).getAbsolutePath());
 }
   }
   verifyFilesToDelete(expectedPathsForDeletion, paths);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/5] hadoop git commit: YARN-8370. Some Node Manager tests fail on Windows due to improper path/file separator. Contributed by Anbang Hu.

2018-06-11 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0b5d0c374 -> 8be1640bf
  refs/heads/branch-2.9 88141548d -> d5f3a050f
  refs/heads/branch-3.0 df338f2e1 -> 26ed14576
  refs/heads/branch-3.1 baac7c2b2 -> 65d2554ad
  refs/heads/trunk 23bfd9f7e -> 2b2f67202


YARN-8370. Some Node Manager tests fail on Windows due to improper path/file 
separator. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b2f6720
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b2f6720
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b2f6720

Branch: refs/heads/trunk
Commit: 2b2f672022547e8c19658213ac5a4090bf5b6c72
Parents: 23bfd9f
Author: Inigo Goiri 
Authored: Mon Jun 11 19:25:56 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 19:25:56 2018 -0700

--
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java | 4 ++--
 .../logaggregation/TestAppLogAggregatorImpl.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2f6720/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index e704c8f..4183fbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -150,9 +150,9 @@ public class TestLocalDirsHandlerService {
 Assert
   .assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
 
-Assert.assertEquals(localDir2,
+Assert.assertEquals(new Path(localDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
-Assert.assertEquals(logDir2,
+Assert.assertEquals(new Path(logDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
 Assert.assertEquals(localDir1 + "," + localDir2,
 dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2f6720/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index 95f4c32..b74eabc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -324,7 +324,7 @@ public class TestAppLogAggregatorImpl {
   for(int i = 0; i < tasks.length; i++) {
 FileDeletionTask task = (FileDeletionTask) tasks[i];
 for (Path path: task.getBaseDirs()) {
-  paths.add(path.toUri().getRawPath());
+  paths.add(new File(path.toUri().getRawPath()).getAbsolutePath());
 }
   }
   verifyFilesToDelete(expectedPathsForDeletion, paths);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/5] hadoop git commit: YARN-8370. Some Node Manager tests fail on Windows due to improper path/file separator. Contributed by Anbang Hu.

2018-06-11 Thread inigoiri
YARN-8370. Some Node Manager tests fail on Windows due to improper path/file 
separator. Contributed by Anbang Hu.

(cherry picked from commit 2b2f672022547e8c19658213ac5a4090bf5b6c72)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8be1640b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8be1640b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8be1640b

Branch: refs/heads/branch-2
Commit: 8be1640bf5ddea7ba3bed29651ae155a9514
Parents: 0b5d0c3
Author: Inigo Goiri 
Authored: Mon Jun 11 19:25:56 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 19:27:34 2018 -0700

--
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java | 4 ++--
 .../logaggregation/TestAppLogAggregatorImpl.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8be1640b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index e704c8f..4183fbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -150,9 +150,9 @@ public class TestLocalDirsHandlerService {
 Assert
   .assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
 
-Assert.assertEquals(localDir2,
+Assert.assertEquals(new Path(localDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
-Assert.assertEquals(logDir2,
+Assert.assertEquals(new Path(logDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
 Assert.assertEquals(localDir1 + "," + localDir2,
 dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8be1640b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index 37ffd00..269dbab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -316,7 +316,7 @@ public class TestAppLogAggregatorImpl {
   for(int i = 0; i < tasks.length; i++) {
 FileDeletionTask task = (FileDeletionTask) tasks[i];
 for (Path path: task.getBaseDirs()) {
-  paths.add(path.toUri().getRawPath());
+  paths.add(new File(path.toUri().getRawPath()).getAbsolutePath());
 }
   }
   verifyFilesToDelete(expectedPathsForDeletion, paths);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/5] hadoop git commit: YARN-8370. Some Node Manager tests fail on Windows due to improper path/file separator. Contributed by Anbang Hu.

2018-06-11 Thread inigoiri
YARN-8370. Some Node Manager tests fail on Windows due to improper path/file 
separator. Contributed by Anbang Hu.

(cherry picked from commit 2b2f672022547e8c19658213ac5a4090bf5b6c72)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26ed1457
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26ed1457
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26ed1457

Branch: refs/heads/branch-3.0
Commit: 26ed14576307fa396e76a97ae1d8b6b7c4d2a7be
Parents: df338f2
Author: Inigo Goiri 
Authored: Mon Jun 11 19:25:56 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 19:26:58 2018 -0700

--
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java | 4 ++--
 .../logaggregation/TestAppLogAggregatorImpl.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26ed1457/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index e704c8f..4183fbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -150,9 +150,9 @@ public class TestLocalDirsHandlerService {
 Assert
   .assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
 
-Assert.assertEquals(localDir2,
+Assert.assertEquals(new Path(localDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
-Assert.assertEquals(logDir2,
+Assert.assertEquals(new Path(logDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
 Assert.assertEquals(localDir1 + "," + localDir2,
 dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26ed1457/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index e13c805..b189e8e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -316,7 +316,7 @@ public class TestAppLogAggregatorImpl {
   for(int i = 0; i < tasks.length; i++) {
 FileDeletionTask task = (FileDeletionTask) tasks[i];
 for (Path path: task.getBaseDirs()) {
-  paths.add(path.toUri().getRawPath());
+  paths.add(new File(path.toUri().getRawPath()).getAbsolutePath());
 }
   }
   verifyFilesToDelete(expectedPathsForDeletion, paths);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: YARN-8370. Some Node Manager tests fail on Windows due to improper path/file separator. Contributed by Anbang Hu.

2018-06-11 Thread inigoiri
YARN-8370. Some Node Manager tests fail on Windows due to improper path/file 
separator. Contributed by Anbang Hu.

(cherry picked from commit 2b2f672022547e8c19658213ac5a4090bf5b6c72)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65d2554a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65d2554a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65d2554a

Branch: refs/heads/branch-3.1
Commit: 65d2554ad8f5e275ace702309e125685226936a0
Parents: baac7c2
Author: Inigo Goiri 
Authored: Mon Jun 11 19:25:56 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 19:26:27 2018 -0700

--
 .../yarn/server/nodemanager/TestLocalDirsHandlerService.java | 4 ++--
 .../logaggregation/TestAppLogAggregatorImpl.java | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65d2554a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index e704c8f..4183fbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -150,9 +150,9 @@ public class TestLocalDirsHandlerService {
 Assert
   .assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
 
-Assert.assertEquals(localDir2,
+Assert.assertEquals(new Path(localDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
-Assert.assertEquals(logDir2,
+Assert.assertEquals(new Path(logDir2).toString(),
 dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
 Assert.assertEquals(localDir1 + "," + localDir2,
 dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65d2554a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index e13c805..b189e8e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -316,7 +316,7 @@ public class TestAppLogAggregatorImpl {
   for(int i = 0; i < tasks.length; i++) {
 FileDeletionTask task = (FileDeletionTask) tasks[i];
 for (Path path: task.getBaseDirs()) {
-  paths.add(path.toUri().getRawPath());
+  paths.add(new File(path.toUri().getRawPath()).getAbsolutePath());
 }
   }
   verifyFilesToDelete(expectedPathsForDeletion, paths);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-72. Add deleteTransactionId field in ContainerInfo. Contributed by Lokesh Jain.

2018-06-11 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7c3dc3908 -> 23bfd9f7e


HDDS-72. Add deleteTransactionId field in ContainerInfo. Contributed by Lokesh 
Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23bfd9f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23bfd9f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23bfd9f7

Branch: refs/heads/trunk
Commit: 23bfd9f7e4ef672613ec59c83d4b47b051949cd1
Parents: 7c3dc39
Author: Xiaoyu Yao 
Authored: Mon Jun 11 16:02:32 2018 -0700
Committer: Xiaoyu Yao 
Committed: Mon Jun 11 16:02:32 2018 -0700

--
 .../container/common/helpers/ContainerInfo.java |  27 -
 .../org/apache/hadoop/ozone/OzoneConsts.java|   2 +
 .../apache/hadoop/utils/MetadataKeyFilters.java | 118 +++
 hadoop-hdds/common/src/main/proto/hdds.proto|   1 +
 .../apache/hadoop/ozone/TestMetadataStore.java  |  61 +-
 .../container/common/helpers/ContainerData.java |  21 
 .../common/helpers/ContainerReport.java |  12 ++
 .../common/impl/ContainerManagerImpl.java   |  15 ++-
 .../background/BlockDeletingService.java|  16 ++-
 .../DeleteBlocksCommandHandler.java |   3 +
 .../StorageContainerDatanodeProtocol.proto  |   1 +
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   9 +-
 .../hadoop/hdds/scm/block/DeletedBlockLog.java  |   3 +-
 .../hdds/scm/block/DeletedBlockLogImpl.java |   8 +-
 .../hdds/scm/container/ContainerMapping.java|  35 ++
 .../scm/container/ContainerStateManager.java|  12 ++
 .../hadoop/hdds/scm/container/Mapping.java  |  11 ++
 .../hadoop/hdds/scm/block/TestBlockManager.java |  16 +++
 .../scm/container/TestContainerMapping.java |   6 +-
 .../container/closer/TestContainerCloser.java   |   3 +-
 .../TestStorageContainerManagerHelper.java  |   6 +-
 .../common/TestBlockDeletingService.java|  29 -
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   |   3 +-
 .../ozone/ksm/KSMMetadataManagerImpl.java   |   9 +-
 .../genesis/BenchMarkContainerStateMap.java |  44 +--
 25 files changed, 401 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23bfd9f7/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index 10fd96c..2c38d45 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.util.Time;
 import java.io.IOException;
 import java.util.Comparator;
 
+import static java.lang.Math.max;
+
 /**
  * Class wraps ozone container info.
  */
@@ -60,6 +62,7 @@ public class ContainerInfo
   private long stateEnterTime;
   private String owner;
   private long containerID;
+  private long deleteTransactionId;
   ContainerInfo(
   long containerID,
   HddsProtos.LifeCycleState state,
@@ -68,7 +71,8 @@ public class ContainerInfo
   long usedBytes,
   long numberOfKeys,
   long stateEnterTime,
-  String owner) {
+  String owner,
+  long deleteTransactionId) {
 this.containerID = containerID;
 this.pipeline = pipeline;
 this.allocatedBytes = allocatedBytes;
@@ -78,6 +82,7 @@ public class ContainerInfo
 this.state = state;
 this.stateEnterTime = stateEnterTime;
 this.owner = owner;
+this.deleteTransactionId = deleteTransactionId;
   }
 
   /**
@@ -96,6 +101,7 @@ public class ContainerInfo
 builder.setStateEnterTime(info.getStateEnterTime());
 builder.setOwner(info.getOwner());
 builder.setContainerID(info.getContainerID());
+builder.setDeleteTransactionId(info.getDeleteTransactionId());
 return builder.build();
   }
 
@@ -141,6 +147,14 @@ public class ContainerInfo
 return numberOfKeys;
   }
 
+  public long getDeleteTransactionId() {
+return deleteTransactionId;
+  }
+
+  public void updateDeleteTransactionId(long transactionId) {
+deleteTransactionId = max(transactionId, deleteTransactionId);
+  }
+
   public ContainerID containerID() {
 return new ContainerID(getContainerID());
   }
@@ -174,6 +188,7 @@ public class ContainerInfo
 builder.setState(state);
 builder.setStateEnterTime(stateEnterTime);
 builder.setContainerID(getContainerID());
+builder.setDeleteTransactionId(deleteTransactionId);
 
 if (getOwner() != null) {
   

[2/3] hadoop git commit: HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update corresponding code blocks. Contributed by Esfandiar Manii.

2018-06-11 Thread cdouglas
HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update 
corresponding code blocks.
Contributed by Esfandiar Manii.

(cherry picked from commit d901be679554eb6b323f3bc6e8de267d85dd2e06)
(cherry picked from commit baac7c2b285454d71d0371505fb7a3403a548176)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df338f2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df338f2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df338f2e

Branch: refs/heads/branch-3.0
Commit: df338f2e1a19ed9c0c5c13f7d4aad08f9836de9f
Parents: 8202c33
Author: Steve Loughran 
Authored: Wed Jun 6 18:28:14 2018 +0100
Committer: Chris Douglas 
Committed: Mon Jun 11 15:33:36 2018 -0700

--
 hadoop-project/pom.xml  | 2 +-
 .../apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java | 4 +++-
 .../org/apache/hadoop/fs/azure/ITestContainerChecks.java| 9 ++---
 3 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df338f2e/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 92a158a..5c2edf9 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1161,7 +1161,7 @@
   
 com.microsoft.azure
 azure-storage
-5.4.0
+7.0.0
  
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df338f2e/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 754f343..e4ad70c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -87,8 +87,10 @@ final class NativeAzureFileSystemHelper {
 if (errorCode != null
 && (errorCode.equals(StorageErrorCodeStrings.BLOB_NOT_FOUND)
 || errorCode.equals(StorageErrorCodeStrings.RESOURCE_NOT_FOUND)
+|| errorCode.equals(StorageErrorCodeStrings.CONTAINER_NOT_FOUND)
 || errorCode.equals(StorageErrorCode.BLOB_NOT_FOUND.toString())
-|| 
errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString( {
+|| errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString())
+|| 
errorCode.equals(StorageErrorCode.CONTAINER_NOT_FOUND.toString( {
 
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df338f2e/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
index cc3baf5..456e4b1 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
@@ -75,7 +75,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -115,7 +115,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -143,7 +143,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -165,6 +165,9 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
 assertFalse(fs.rename(foo, bar));
 assertFalse(container.exists());
 
+// Create a container outside 

[1/3] hadoop git commit: HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update corresponding code blocks. Contributed by Esfandiar Manii.

2018-06-11 Thread cdouglas
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 805939079 -> 0b5d0c374
  refs/heads/branch-3.0 8202c334f -> df338f2e1
  refs/heads/branch-3.1 425fe4e21 -> baac7c2b2


HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update 
corresponding code blocks.
Contributed by Esfandiar Manii.

(cherry picked from commit d901be679554eb6b323f3bc6e8de267d85dd2e06)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/baac7c2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/baac7c2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/baac7c2b

Branch: refs/heads/branch-3.1
Commit: baac7c2b285454d71d0371505fb7a3403a548176
Parents: 425fe4e
Author: Steve Loughran 
Authored: Wed Jun 6 18:28:14 2018 +0100
Committer: Chris Douglas 
Committed: Mon Jun 11 15:33:03 2018 -0700

--
 hadoop-project/pom.xml  | 2 +-
 .../apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java | 4 +++-
 .../org/apache/hadoop/fs/azure/ITestContainerChecks.java| 9 ++---
 3 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac7c2b/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 55a78e9..e674a82 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1182,7 +1182,7 @@
   
 com.microsoft.azure
 azure-storage
-5.4.0
+7.0.0
  
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac7c2b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 754f343..e4ad70c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -87,8 +87,10 @@ final class NativeAzureFileSystemHelper {
 if (errorCode != null
 && (errorCode.equals(StorageErrorCodeStrings.BLOB_NOT_FOUND)
 || errorCode.equals(StorageErrorCodeStrings.RESOURCE_NOT_FOUND)
+|| errorCode.equals(StorageErrorCodeStrings.CONTAINER_NOT_FOUND)
 || errorCode.equals(StorageErrorCode.BLOB_NOT_FOUND.toString())
-|| 
errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString( {
+|| errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString())
+|| 
errorCode.equals(StorageErrorCode.CONTAINER_NOT_FOUND.toString( {
 
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/baac7c2b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
index cc3baf5..456e4b1 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
@@ -75,7 +75,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -115,7 +115,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -143,7 +143,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -165,6 +165,9 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
 

[3/3] hadoop git commit: HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update corresponding code blocks. Contributed by Esfandiar Manii.

2018-06-11 Thread cdouglas
HADOOP-15506. Upgrade Azure Storage Sdk version to 7.0.0 and update 
corresponding code blocks.
Contributed by Esfandiar Manii.

(cherry picked from commit d901be679554eb6b323f3bc6e8de267d85dd2e06)
(cherry picked from commit baac7c2b285454d71d0371505fb7a3403a548176)
(cherry picked from commit df338f2e1a19ed9c0c5c13f7d4aad08f9836de9f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b5d0c37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b5d0c37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b5d0c37

Branch: refs/heads/branch-2
Commit: 0b5d0c3740c96a5ce439eadbd66e44aa017cdf30
Parents: 8059390
Author: Steve Loughran 
Authored: Wed Jun 6 18:28:14 2018 +0100
Committer: Chris Douglas 
Committed: Mon Jun 11 15:34:54 2018 -0700

--
 hadoop-project/pom.xml  | 6 ++
 .../apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java | 4 +++-
 .../org/apache/hadoop/fs/azure/ITestContainerChecks.java| 9 ++---
 3 files changed, 15 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b5d0c37/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8812132..f1ab70d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1101,6 +1101,12 @@
   
 
   
+com.microsoft.azure
+azure-storage
+7.0.0
+  
+
+  
 com.aliyun.oss
 aliyun-sdk-oss
 2.8.3

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b5d0c37/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 754f343..e4ad70c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -87,8 +87,10 @@ final class NativeAzureFileSystemHelper {
 if (errorCode != null
 && (errorCode.equals(StorageErrorCodeStrings.BLOB_NOT_FOUND)
 || errorCode.equals(StorageErrorCodeStrings.RESOURCE_NOT_FOUND)
+|| errorCode.equals(StorageErrorCodeStrings.CONTAINER_NOT_FOUND)
 || errorCode.equals(StorageErrorCode.BLOB_NOT_FOUND.toString())
-|| 
errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString( {
+|| errorCode.equals(StorageErrorCode.RESOURCE_NOT_FOUND.toString())
+|| 
errorCode.equals(StorageErrorCode.CONTAINER_NOT_FOUND.toString( {
 
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b5d0c37/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
index 417ee0e..cb8e0c9 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java
@@ -75,7 +75,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -115,7 +115,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -143,7 +143,7 @@ public class ITestContainerChecks extends 
AbstractWasbTestWithTimeout {
   assertTrue("Should've thrown.", false);
 } catch (FileNotFoundException ex) {
   assertTrue("Unexpected exception: " + ex,
-  ex.getMessage().contains("does not exist."));
+  ex.getMessage().contains("is not found"));
 }
 assertFalse(container.exists());
 
@@ -165,6 +165,9 @@ public class ITestContainerChecks extends 

hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release. (cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3)

2018-06-11 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e5cfe6df -> 7c3dc3908


Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release.
(cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c3dc390
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c3dc390
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c3dc390

Branch: refs/heads/trunk
Commit: 7c3dc39083ef6608f8a8fe7699195d4d369ec5e4
Parents: 2e5cfe6
Author: Yongjun Zhang 
Authored: Sun Jun 10 23:07:24 2018 -0700
Committer: Yongjun Zhang 
Committed: Mon Jun 11 15:13:18 2018 -0700

--
 .../markdown/release/3.0.3/CHANGES.3.0.3.md | 309 ++
 .../release/3.0.3/RELEASENOTES.3.0.3.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.3.xml  | 322 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 663 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c3dc390/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
new file mode 100644
index 000..4806543
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
@@ -0,0 +1,309 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.3 - 2018-05-31
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13283](https://issues.apache.org/jira/browse/HDFS-13283) | Percentage 
based Reserved Space Calculation for DataNode |  Major | datanode, hdfs | Lukas 
Majercak | Lukas Majercak |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12455](https://issues.apache.org/jira/browse/HDFS-12455) | WebHDFS - 
Adding "snapshot enabled" status to ListStatus query result. |  Major | 
snapshots, webhdfs | Ajay Kumar | Ajay Kumar |
+| [HDFS-13062](https://issues.apache.org/jira/browse/HDFS-13062) | Provide 
support for JN to use separate journal disk per namespace |  Major | 
federation, journal-node | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-12933](https://issues.apache.org/jira/browse/HDFS-12933) | Improve 
logging when DFSStripedOutputStream failed to write some blocks |  Minor | 
erasure-coding | Xiao Chen | chencan |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to 
support per-store configuration |  Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity 
Scheduler Intra-queue Preemption should be configurable for each queue |  Major 
| capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-13175](https://issues.apache.org/jira/browse/HDFS-13175) | Add more 
information for checking argument in DiskBalancerVolume |  Minor | diskbalancer 
| Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize 
disk access for last partial chunk checksum of Finalized replica |  Major | 
datanode | Wei-Chiu Chuang | Gabor Bota |
+| [MAPREDUCE-7061](https://issues.apache.org/jira/browse/MAPREDUCE-7061) | 
SingleCluster setup document needs to be updated |  Major | . | Bharat 
Viswanadham | Bharat Viswanadham |
+| [HADOOP-15263](https://issues.apache.org/jira/browse/HADOOP-15263) | hadoop 
cloud-storage module to mark hadoop-common as provided; add azure-datalake |  
Minor | build | Steve Loughran | Steve Loughran |
+| [MAPREDUCE-7060](https://issues.apache.org/jira/browse/MAPREDUCE-7060) | 
Cherry Pick PathOutputCommitter class/factory to branch-3.0 |  Minor | . | 
Steve Loughran | Steve Loughran |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | 
increase maven heap size recommendations |  Minor | build, documentation, test 
| Allen Wittenauer | Allen Wittenauer |
+| [HDFS-13170](https://issues.apache.org/jira/browse/HDFS-13170) | Port 
webhdfs unmaskedpermission parameter to HTTPFS |  Major | . | Stephen O'Donnell 
| Stephen O'Donnell |
+| [HDFS-13225](https://issues.apache.org/jira/browse/HDFS-13225) | 

[4/5] hadoop git commit: Add 2.9.1 release notes and changes documents

2018-06-11 Thread yjzhangal
Add 2.9.1 release notes and changes documents

(cherry picked from commit f3f544b00475583c4c9fe52be0d2004390979bd0)
(cherry picked from commit a700d05f72b3695722f652626d35668f34e35285)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/faf7d5e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/faf7d5e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/faf7d5e3

Branch: refs/heads/branch-3
Commit: faf7d5e3f57594424eb3c6a3e2bc26b29bba81e0
Parents: e0c4972
Author: Sammi Chen 
Authored: Mon May 14 15:14:02 2018 +0800
Committer: Yongjun Zhang 
Committed: Mon Jun 11 15:10:10 2018 -0700

--
 .../markdown/release/2.9.1/CHANGES.2.9.1.md | 277 
 .../release/2.9.1/RELEASENOTES.2.9.1.md |  88 ++
 .../jdiff/Apache_Hadoop_HDFS_2.9.1.xml  | 312 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 678 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/faf7d5e3/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
new file mode 100644
index 000..c5e53f6
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/2.9.1/CHANGES.2.9.1.md
@@ -0,0 +1,277 @@
+
+
+# "Apache Hadoop" Changelog
+
+## Release 2.9.1 - 2018-04-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: 
Document Router and State Store metrics |  Major | documentation | Yiqun Lin | 
Yiqun Lin |
+| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add 
ACL support for mount table |  Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only 
NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath |  
Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun 
Saxena |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-12756](https://issues.apache.org/jira/browse/HADOOP-12756) | 
Incorporate Aliyun OSS file system implementation |  Major | fs, fs/oss | 
shimingfei | mingfei.shi |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | 
CryptoInputStream should implement unbuffer |  Major | fs, security | John 
Zhuge | John Zhuge |
+| [HADOOP-14964](https://issues.apache.org/jira/browse/HADOOP-14964) | 
AliyunOSS: backport Aliyun OSS module to branch-2 |  Major | fs/oss | Genmao Yu 
| SammiChen |
+| [YARN-6851](https://issues.apache.org/jira/browse/YARN-6851) | Capacity 
Scheduler: document configs for controlling # containers allowed to be 
allocated per node heartbeat |  Minor | . | Wei Yan | Wei Yan |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve 
robustness of the AggregatedLogDeletionService |  Major | log-aggregation | 
Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager 
web UI should display container type in containers page |  Major | nodemanager, 
webapp | Weiwei Yang | Weiwei Yang |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix 
TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | 
Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add 
readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John 
Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | 
AliyunOSS: change the default value of max error retry |  Major | fs/oss | 
wujinhu | wujinhu |
+| [YARN-7642](https://issues.apache.org/jira/browse/YARN-7642) | Add test case 
to verify context update after container promotion or demotion with or without 

[2/5] hadoop git commit: Update 3.0.1 jdiff file and jdiff stable api version (cherry picked from commit 4859cd7cc936d5fcf115a2f1cb06fe45a742ff5d)

2018-06-11 Thread yjzhangal
Update 3.0.1 jdiff file and jdiff stable api version
(cherry picked from commit 4859cd7cc936d5fcf115a2f1cb06fe45a742ff5d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9da84120
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9da84120
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9da84120

Branch: refs/heads/branch-3
Commit: 9da841208f5001ef1c48c3286ba0b80987b81aa4
Parents: c2001d2
Author: Lei Xu 
Authored: Fri Mar 23 11:48:36 2018 -0700
Committer: Yongjun Zhang 
Committed: Mon Jun 11 15:09:02 2018 -0700

--
 .../jdiff/Apache_Hadoop_HDFS_3.0.1.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 2 files changed, 325 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9da84120/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
new file mode 100644
index 000..91c8a6b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9da84120/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index f11e064..91de2c3 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
 false
   
   
-3.0.0
+3.0.1
 -unstable
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/5] hadoop git commit: Update releasenotes and changelogs for 3.0.1 release (cherry picked from commit 98d7a5aaef2bbef46e0e7b6c876490f9235c59f5)

2018-06-11 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 702d280c8 -> 4c3fb756a


Update releasenotes and changelogs for 3.0.1 release
(cherry picked from commit 98d7a5aaef2bbef46e0e7b6c876490f9235c59f5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2001d21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2001d21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2001d21

Branch: refs/heads/branch-3
Commit: c2001d21f1bb7fe6b178f36ff6776407bda7fe29
Parents: 702d280
Author: Lei Xu 
Authored: Fri Mar 23 11:43:09 2018 -0700
Committer: Yongjun Zhang 
Committed: Mon Jun 11 15:08:45 2018 -0700

--
 .../markdown/release/3.0.1/CHANGES.3.0.1.md | 241 +++
 .../release/3.0.1/RELEASENOTES.3.0.1.md |  54 +
 2 files changed, 295 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2001d21/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
new file mode 100644
index 000..d24a8f4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
@@ -0,0 +1,241 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.1 - 2018-03-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12990](https://issues.apache.org/jira/browse/HDFS-12990) | Change 
default NameNode RPC port back to 8020 |  Critical | namenode | Xiao Chen | 
Xiao Chen |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | 
CryptoInputStream should implement unbuffer |  Major | fs, security | John 
Zhuge | John Zhuge |
+| [YARN-7414](https://issues.apache.org/jira/browse/YARN-7414) | 
FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() |  
Minor | fairscheduler | Daniel Templeton | Soumabrata Chakraborty |
+| [HADOOP-15023](https://issues.apache.org/jira/browse/HADOOP-15023) | 
ValueQueue should also validate (lowWatermark \* numValues) \> 0 on 
construction |  Minor | . | Xiao Chen | Xiao Chen |
+| [HDFS-12814](https://issues.apache.org/jira/browse/HDFS-12814) | Add blockId 
when warning slow mirror/disk in BlockReceiver |  Trivial | hdfs | Jiandan Yang 
| Jiandan Yang |
+| [YARN-7524](https://issues.apache.org/jira/browse/YARN-7524) | Remove unused 
FairSchedulerEventLog |  Major | fairscheduler | Wilfred Spiegelenburg | 
Wilfred Spiegelenburg |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve 
robustness of the AggregatedLogDeletionService |  Major | log-aggregation | 
Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager 
web UI should display container type in containers page |  Major | nodemanager, 
webapp | Weiwei Yang | Weiwei Yang |
+| [YARN-6483](https://issues.apache.org/jira/browse/YARN-6483) | Add nodes 
transitioning to DECOMMISSIONING state to the list of updated nodes returned to 
the AM |  Major | resourcemanager | Juan Rodríguez Hortalá | Juan Rodríguez 
Hortalá |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix 
TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | 
Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add 
readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John 
Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | 
AliyunOSS: change the default value of max error retry |  Major | fs/oss | 
wujinhu | wujinhu |
+| [HDFS-12910](https://issues.apache.org/jira/browse/HDFS-12910) | Secure 
Datanode Starter should log the port when it fails to bind |  Minor | datanode 
| Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-12819](https://issues.apache.org/jira/browse/HDFS-12819) | 
Setting/Unsetting EC policy shows warning if the directory is not empty |  
Minor | erasure-coding | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-12927](https://issues.apache.org/jira/browse/HDFS-12927) | Update 
erasure coding doc to address unsupported APIs |  

[3/5] hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.2 release.

2018-06-11 Thread yjzhangal
Update CHANGES, RELEASENOTES, and jdiff for 3.0.2 release.

(cherry picked from commit f6ecb76d0b919b9836600fe28ec9e637b223cd54)

Conflicts:
hadoop-project-dist/pom.xml
(cherry picked from commit 42e82f02812c38f2965bd5fccbf71bed6ff89992)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0c49726
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0c49726
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0c49726

Branch: refs/heads/branch-3
Commit: e0c49726e2501851fa98292de0f3738d1baab2ef
Parents: 9da8412
Author: Lei Xu 
Authored: Mon Apr 23 14:07:43 2018 -0700
Committer: Yongjun Zhang 
Committed: Mon Jun 11 15:09:46 2018 -0700

--
 .../markdown/release/3.0.2/CHANGES.3.0.2.md |  31 ++
 .../release/3.0.2/RELEASENOTES.3.0.2.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.2.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 387 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0c49726/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
new file mode 100644
index 000..96953ee
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
@@ -0,0 +1,31 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.2 - 2018-04-13
+
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15368](https://issues.apache.org/jira/browse/HADOOP-15368) | Apache 
Hadoop release 3.0.2 to fix deploying shaded jars in artifacts. |  Major | . | 
Lei (Eddy) Xu | Lei (Eddy) Xu |
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0c49726/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
new file mode 100644
index 000..5132bc0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
@@ -0,0 +1,31 @@
+
+
+# Apache Hadoop  3.0.2 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-15368](https://issues.apache.org/jira/browse/HADOOP-15368) | *Major* 
| **Apache Hadoop release 3.0.2 to fix deploying shaded jars in artifacts.**
+
+Release Apache Hadoop 3.0.2 on the same code base as Hadoop 3.0.1, but deploy 
with shaded jars.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0c49726/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
new file mode 100644
index 000..b60de84
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0c49726/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 

[5/5] hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release. (cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3)

2018-06-11 Thread yjzhangal
Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release.
(cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c3fb756
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c3fb756
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c3fb756

Branch: refs/heads/branch-3
Commit: 4c3fb756ad9762f056fb7d972bec3c69036a7bcc
Parents: faf7d5e
Author: Yongjun Zhang 
Authored: Sun Jun 10 23:07:24 2018 -0700
Committer: Yongjun Zhang 
Committed: Mon Jun 11 15:10:44 2018 -0700

--
 .../markdown/release/3.0.3/CHANGES.3.0.3.md | 309 ++
 .../release/3.0.3/RELEASENOTES.3.0.3.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.3.xml  | 322 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 663 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c3fb756/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
new file mode 100644
index 000..4806543
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
@@ -0,0 +1,309 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.3 - 2018-05-31
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13283](https://issues.apache.org/jira/browse/HDFS-13283) | Percentage 
based Reserved Space Calculation for DataNode |  Major | datanode, hdfs | Lukas 
Majercak | Lukas Majercak |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12455](https://issues.apache.org/jira/browse/HDFS-12455) | WebHDFS - 
Adding "snapshot enabled" status to ListStatus query result. |  Major | 
snapshots, webhdfs | Ajay Kumar | Ajay Kumar |
+| [HDFS-13062](https://issues.apache.org/jira/browse/HDFS-13062) | Provide 
support for JN to use separate journal disk per namespace |  Major | 
federation, journal-node | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-12933](https://issues.apache.org/jira/browse/HDFS-12933) | Improve 
logging when DFSStripedOutputStream failed to write some blocks |  Minor | 
erasure-coding | Xiao Chen | chencan |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to 
support per-store configuration |  Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity 
Scheduler Intra-queue Preemption should be configurable for each queue |  Major 
| capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-13175](https://issues.apache.org/jira/browse/HDFS-13175) | Add more 
information for checking argument in DiskBalancerVolume |  Minor | diskbalancer 
| Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize 
disk access for last partial chunk checksum of Finalized replica |  Major | 
datanode | Wei-Chiu Chuang | Gabor Bota |
+| [MAPREDUCE-7061](https://issues.apache.org/jira/browse/MAPREDUCE-7061) | 
SingleCluster setup document needs to be updated |  Major | . | Bharat 
Viswanadham | Bharat Viswanadham |
+| [HADOOP-15263](https://issues.apache.org/jira/browse/HADOOP-15263) | hadoop 
cloud-storage module to mark hadoop-common as provided; add azure-datalake |  
Minor | build | Steve Loughran | Steve Loughran |
+| [MAPREDUCE-7060](https://issues.apache.org/jira/browse/MAPREDUCE-7060) | 
Cherry Pick PathOutputCommitter class/factory to branch-3.0 |  Minor | . | 
Steve Loughran | Steve Loughran |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | 
increase maven heap size recommendations |  Minor | build, documentation, test 
| Allen Wittenauer | Allen Wittenauer |
+| [HDFS-13170](https://issues.apache.org/jira/browse/HDFS-13170) | Port 
webhdfs unmaskedpermission parameter to HTTPFS |  Major | . | Stephen O'Donnell 
| Stephen O'Donnell |
+| [HDFS-13225](https://issues.apache.org/jira/browse/HDFS-13225) | 
StripeReader#checkMissingBlocks() 's IOException info is incomplete |  Major | 
erasure-coding, 

hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release. (cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3)

2018-06-11 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f0150f024 -> 425fe4e21


Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release.
(cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/425fe4e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/425fe4e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/425fe4e2

Branch: refs/heads/branch-3.1
Commit: 425fe4e2179e6e155913c6f4ac90e86661e106c2
Parents: f0150f0
Author: Yongjun Zhang 
Authored: Sun Jun 10 23:07:24 2018 -0700
Committer: Yongjun Zhang 
Committed: Mon Jun 11 14:31:46 2018 -0700

--
 .../markdown/release/3.0.3/CHANGES.3.0.3.md | 309 ++
 .../release/3.0.3/RELEASENOTES.3.0.3.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.3.xml  | 322 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 663 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/425fe4e2/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
new file mode 100644
index 000..4806543
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
@@ -0,0 +1,309 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.3 - 2018-05-31
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13283](https://issues.apache.org/jira/browse/HDFS-13283) | Percentage 
based Reserved Space Calculation for DataNode |  Major | datanode, hdfs | Lukas 
Majercak | Lukas Majercak |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12455](https://issues.apache.org/jira/browse/HDFS-12455) | WebHDFS - 
Adding "snapshot enabled" status to ListStatus query result. |  Major | 
snapshots, webhdfs | Ajay Kumar | Ajay Kumar |
+| [HDFS-13062](https://issues.apache.org/jira/browse/HDFS-13062) | Provide 
support for JN to use separate journal disk per namespace |  Major | 
federation, journal-node | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-12933](https://issues.apache.org/jira/browse/HDFS-12933) | Improve 
logging when DFSStripedOutputStream failed to write some blocks |  Minor | 
erasure-coding | Xiao Chen | chencan |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to 
support per-store configuration |  Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity 
Scheduler Intra-queue Preemption should be configurable for each queue |  Major 
| capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-13175](https://issues.apache.org/jira/browse/HDFS-13175) | Add more 
information for checking argument in DiskBalancerVolume |  Minor | diskbalancer 
| Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize 
disk access for last partial chunk checksum of Finalized replica |  Major | 
datanode | Wei-Chiu Chuang | Gabor Bota |
+| [MAPREDUCE-7061](https://issues.apache.org/jira/browse/MAPREDUCE-7061) | 
SingleCluster setup document needs to be updated |  Major | . | Bharat 
Viswanadham | Bharat Viswanadham |
+| [HADOOP-15263](https://issues.apache.org/jira/browse/HADOOP-15263) | hadoop 
cloud-storage module to mark hadoop-common as provided; add azure-datalake |  
Minor | build | Steve Loughran | Steve Loughran |
+| [MAPREDUCE-7060](https://issues.apache.org/jira/browse/MAPREDUCE-7060) | 
Cherry Pick PathOutputCommitter class/factory to branch-3.0 |  Minor | . | 
Steve Loughran | Steve Loughran |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | 
increase maven heap size recommendations |  Minor | build, documentation, test 
| Allen Wittenauer | Allen Wittenauer |
+| [HDFS-13170](https://issues.apache.org/jira/browse/HDFS-13170) | Port 
webhdfs unmaskedpermission parameter to HTTPFS |  Major | . | Stephen O'Donnell 
| Stephen O'Donnell |
+| [HDFS-13225](https://issues.apache.org/jira/browse/HDFS-13225) | 

hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release. (cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3)

2018-06-11 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6eea6d28d -> 8202c334f


Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release.
(cherry picked from commit 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8202c334
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8202c334
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8202c334

Branch: refs/heads/branch-3.0
Commit: 8202c334fe789ef36b31c13dd636453fbafab40a
Parents: 6eea6d2
Author: Yongjun Zhang 
Authored: Sun Jun 10 23:07:24 2018 -0700
Committer: Yongjun Zhang 
Committed: Mon Jun 11 14:28:44 2018 -0700

--
 .../markdown/release/3.0.3/CHANGES.3.0.3.md | 309 ++
 .../release/3.0.3/RELEASENOTES.3.0.3.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.3.xml  | 322 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 663 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8202c334/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
new file mode 100644
index 000..4806543
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
@@ -0,0 +1,309 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.3 - 2018-05-31
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13283](https://issues.apache.org/jira/browse/HDFS-13283) | Percentage 
based Reserved Space Calculation for DataNode |  Major | datanode, hdfs | Lukas 
Majercak | Lukas Majercak |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12455](https://issues.apache.org/jira/browse/HDFS-12455) | WebHDFS - 
Adding "snapshot enabled" status to ListStatus query result. |  Major | 
snapshots, webhdfs | Ajay Kumar | Ajay Kumar |
+| [HDFS-13062](https://issues.apache.org/jira/browse/HDFS-13062) | Provide 
support for JN to use separate journal disk per namespace |  Major | 
federation, journal-node | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-12933](https://issues.apache.org/jira/browse/HDFS-12933) | Improve 
logging when DFSStripedOutputStream failed to write some blocks |  Minor | 
erasure-coding | Xiao Chen | chencan |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to 
support per-store configuration |  Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity 
Scheduler Intra-queue Preemption should be configurable for each queue |  Major 
| capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-13175](https://issues.apache.org/jira/browse/HDFS-13175) | Add more 
information for checking argument in DiskBalancerVolume |  Minor | diskbalancer 
| Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize 
disk access for last partial chunk checksum of Finalized replica |  Major | 
datanode | Wei-Chiu Chuang | Gabor Bota |
+| [MAPREDUCE-7061](https://issues.apache.org/jira/browse/MAPREDUCE-7061) | 
SingleCluster setup document needs to be updated |  Major | . | Bharat 
Viswanadham | Bharat Viswanadham |
+| [HADOOP-15263](https://issues.apache.org/jira/browse/HADOOP-15263) | hadoop 
cloud-storage module to mark hadoop-common as provided; add azure-datalake |  
Minor | build | Steve Loughran | Steve Loughran |
+| [MAPREDUCE-7060](https://issues.apache.org/jira/browse/MAPREDUCE-7060) | 
Cherry Pick PathOutputCommitter class/factory to branch-3.0 |  Minor | . | 
Steve Loughran | Steve Loughran |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | 
increase maven heap size recommendations |  Minor | build, documentation, test 
| Allen Wittenauer | Allen Wittenauer |
+| [HDFS-13170](https://issues.apache.org/jira/browse/HDFS-13170) | Port 
webhdfs unmaskedpermission parameter to HTTPFS |  Major | . | Stephen O'Donnell 
| Stephen O'Donnell |
+| [HDFS-13225](https://issues.apache.org/jira/browse/HDFS-13225) | 

hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release.

2018-06-11 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.3 37fd7d752 -> 5a82f10e3


Update CHANGES, RELEASENOTES, and jdiff for 3.0.3 release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a82f10e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a82f10e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a82f10e

Branch: refs/heads/branch-3.0.3
Commit: 5a82f10e3254bc0745c2dda6bcef888d3ff0d1c3
Parents: 37fd7d7
Author: Yongjun Zhang 
Authored: Sun Jun 10 23:07:24 2018 -0700
Committer: Yongjun Zhang 
Committed: Mon Jun 11 00:00:43 2018 -0700

--
 .../markdown/release/3.0.3/CHANGES.3.0.3.md | 309 ++
 .../release/3.0.3/RELEASENOTES.3.0.3.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.3.xml  | 322 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 663 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a82f10e/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
new file mode 100644
index 000..4806543
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.3/CHANGES.3.0.3.md
@@ -0,0 +1,309 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.3 - 2018-05-31
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13283](https://issues.apache.org/jira/browse/HDFS-13283) | Percentage 
based Reserved Space Calculation for DataNode |  Major | datanode, hdfs | Lukas 
Majercak | Lukas Majercak |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12455](https://issues.apache.org/jira/browse/HDFS-12455) | WebHDFS - 
Adding "snapshot enabled" status to ListStatus query result. |  Major | 
snapshots, webhdfs | Ajay Kumar | Ajay Kumar |
+| [HDFS-13062](https://issues.apache.org/jira/browse/HDFS-13062) | Provide 
support for JN to use separate journal disk per namespace |  Major | 
federation, journal-node | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-12933](https://issues.apache.org/jira/browse/HDFS-12933) | Improve 
logging when DFSStripedOutputStream failed to write some blocks |  Minor | 
erasure-coding | Xiao Chen | chencan |
+| [HADOOP-13972](https://issues.apache.org/jira/browse/HADOOP-13972) | ADLS to 
support per-store configuration |  Major | fs/adl | John Zhuge | Sharad Sonker |
+| [YARN-7813](https://issues.apache.org/jira/browse/YARN-7813) | Capacity 
Scheduler Intra-queue Preemption should be configurable for each queue |  Major 
| capacity scheduler, scheduler preemption | Eric Payne | Eric Payne |
+| [HDFS-13175](https://issues.apache.org/jira/browse/HDFS-13175) | Add more 
information for checking argument in DiskBalancerVolume |  Minor | diskbalancer 
| Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-11187](https://issues.apache.org/jira/browse/HDFS-11187) | Optimize 
disk access for last partial chunk checksum of Finalized replica |  Major | 
datanode | Wei-Chiu Chuang | Gabor Bota |
+| [MAPREDUCE-7061](https://issues.apache.org/jira/browse/MAPREDUCE-7061) | 
SingleCluster setup document needs to be updated |  Major | . | Bharat 
Viswanadham | Bharat Viswanadham |
+| [HADOOP-15263](https://issues.apache.org/jira/browse/HADOOP-15263) | hadoop 
cloud-storage module to mark hadoop-common as provided; add azure-datalake |  
Minor | build | Steve Loughran | Steve Loughran |
+| [MAPREDUCE-7060](https://issues.apache.org/jira/browse/MAPREDUCE-7060) | 
Cherry Pick PathOutputCommitter class/factory to branch-3.0 |  Minor | . | 
Steve Loughran | Steve Loughran |
+| [HADOOP-15279](https://issues.apache.org/jira/browse/HADOOP-15279) | 
increase maven heap size recommendations |  Minor | build, documentation, test 
| Allen Wittenauer | Allen Wittenauer |
+| [HDFS-13170](https://issues.apache.org/jira/browse/HDFS-13170) | Port 
webhdfs unmaskedpermission parameter to HTTPFS |  Major | . | Stephen O'Donnell 
| Stephen O'Donnell |
+| [HDFS-13225](https://issues.apache.org/jira/browse/HDFS-13225) | 
StripeReader#checkMissingBlocks() 's IOException info is incomplete |  

[1/2] hadoop git commit: HDFS-13653. Make dfs.client.failover.random.order a per nameservice configuration. Contributed by Ekanth Sethuramalingam.

2018-06-11 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4049fa37a -> 805939079
  refs/heads/branch-2.9 0aa4067e9 -> 88141548d


HDFS-13653. Make dfs.client.failover.random.order a per nameservice 
configuration. Contributed by Ekanth Sethuramalingam.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80593907
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80593907
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80593907

Branch: refs/heads/branch-2
Commit: 8059390798b63589c313415a4bc175293edac529
Parents: 4049fa3
Author: Inigo Goiri 
Authored: Mon Jun 11 13:47:08 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 13:47:08 2018 -0700

--
 .../ha/ConfiguredFailoverProxyProvider.java |  29 +-
 .../ha/TestConfiguredFailoverProxyProvider.java | 264 +++
 .../src/main/resources/hdfs-default.xml |  12 +
 .../hadoop/tools/TestHdfsConfigFields.java  |   1 +
 4 files changed, 303 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80593907/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 58f4943..96722fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -94,9 +94,7 @@ public class ConfiguredFailoverProxyProvider extends
 proxies.add(new AddressRpcProxyPair(address));
   }
   // Randomize the list to prevent all clients pointing to the same one
-  boolean randomized = conf.getBoolean(
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER,
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  boolean randomized = getRandomOrder(conf, uri);
   if (randomized) {
 Collections.shuffle(proxies);
   }
@@ -111,6 +109,31 @@ public class ConfiguredFailoverProxyProvider extends
 }
   }
 
+  /**
+   * Check whether random order is configured for failover proxy provider
+   * for the namenode/nameservice.
+   *
+   * @param conf Configuration
+   * @param nameNodeUri The URI of namenode/nameservice
+   * @return random order configuration
+   */
+  private static boolean getRandomOrder(
+  Configuration conf, URI nameNodeUri) {
+String host = nameNodeUri.getHost();
+String configKeyWithHost = HdfsClientConfigKeys.Failover.RANDOM_ORDER
++ "." + host;
+
+if (conf.get(configKeyWithHost) != null) {
+  return conf.getBoolean(
+  configKeyWithHost,
+  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+}
+
+return conf.getBoolean(
+HdfsClientConfigKeys.Failover.RANDOM_ORDER,
+HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  }
+
   @Override
   public Class getInterface() {
 return xface;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80593907/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
new file mode 100644
index 000..d7a5db6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 

[2/2] hadoop git commit: HDFS-13653. Make dfs.client.failover.random.order a per nameservice configuration. Contributed by Ekanth Sethuramalingam.

2018-06-11 Thread inigoiri
HDFS-13653. Make dfs.client.failover.random.order a per nameservice 
configuration. Contributed by Ekanth Sethuramalingam.

(cherry picked from commit 8059390798b63589c313415a4bc175293edac529)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88141548
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88141548
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88141548

Branch: refs/heads/branch-2.9
Commit: 88141548d61c966d6a7ee1b2d413274ff6796ab9
Parents: 0aa4067
Author: Inigo Goiri 
Authored: Mon Jun 11 13:47:08 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 13:48:02 2018 -0700

--
 .../ha/ConfiguredFailoverProxyProvider.java |  29 +-
 .../ha/TestConfiguredFailoverProxyProvider.java | 264 +++
 .../src/main/resources/hdfs-default.xml |  12 +
 .../hadoop/tools/TestHdfsConfigFields.java  |   1 +
 4 files changed, 303 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88141548/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 58f4943..96722fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -94,9 +94,7 @@ public class ConfiguredFailoverProxyProvider extends
 proxies.add(new AddressRpcProxyPair(address));
   }
   // Randomize the list to prevent all clients pointing to the same one
-  boolean randomized = conf.getBoolean(
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER,
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  boolean randomized = getRandomOrder(conf, uri);
   if (randomized) {
 Collections.shuffle(proxies);
   }
@@ -111,6 +109,31 @@ public class ConfiguredFailoverProxyProvider extends
 }
   }
 
+  /**
+   * Check whether random order is configured for failover proxy provider
+   * for the namenode/nameservice.
+   *
+   * @param conf Configuration
+   * @param nameNodeUri The URI of namenode/nameservice
+   * @return random order configuration
+   */
+  private static boolean getRandomOrder(
+  Configuration conf, URI nameNodeUri) {
+String host = nameNodeUri.getHost();
+String configKeyWithHost = HdfsClientConfigKeys.Failover.RANDOM_ORDER
++ "." + host;
+
+if (conf.get(configKeyWithHost) != null) {
+  return conf.getBoolean(
+  configKeyWithHost,
+  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+}
+
+return conf.getBoolean(
+HdfsClientConfigKeys.Failover.RANDOM_ORDER,
+HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  }
+
   @Override
   public Class getInterface() {
 return xface;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88141548/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
new file mode 100644
index 000..d7a5db6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language 

hadoop git commit: HDDS-136:Rename dbPath, containerFilePath. Contributed by Bharat Viswanadham

2018-06-11 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 0e437f9b1 -> 7e228e54c


HDDS-136:Rename dbPath,containerFilePath. Contributed by Bharat Viswanadham


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e228e54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e228e54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e228e54

Branch: refs/heads/HDDS-48
Commit: 7e228e54c56598e263d66e3ef74476e12a3b1f30
Parents: 0e437f9
Author: Bharat Viswanadham 
Authored: Mon Jun 11 13:14:03 2018 -0700
Committer: Bharat Viswanadham 
Committed: Mon Jun 11 13:14:03 2018 -0700

--
 .../common/impl/KeyValueContainerData.java  | 34 ++--
 .../container/common/impl/KeyValueYaml.java |  9 +++---
 .../common/TestKeyValueContainerData.java   |  8 ++---
 .../container/common/impl/TestKeyValueYaml.java | 16 -
 .../test/resources/additionalfields.container   |  7 ++--
 .../src/test/resources/incorrect.container  |  7 ++--
 6 files changed, 41 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e228e54/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java
index 57b5264..0889913 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueContainerData.java
@@ -30,11 +30,11 @@ import java.io.IOException;
  */
 public class KeyValueContainerData extends ContainerData {
 
-  // Path to Level DB/RocksDB Store.
-  private String dbPath;
+  // Path to Container metadata Level DB/RocksDB Store and .container file.
+  private String metadataPath;
 
-  // Path to Physical file system where container and checksum are stored.
-  private String containerFilePath;
+  // Path to Physical file system where chunks are stored.
+  private String chunksPath;
 
   //Type of DB used to store key to chunks mapping
   private String containerDBType;
@@ -64,37 +64,37 @@ public class KeyValueContainerData extends ContainerData {
 this.numPendingDeletionBlocks = 0;
   }
   /**
-   * Returns path.
+   * Returns container metadata path.
*
* @return - path
*/
-  public String getDbPath() {
-return dbPath;
+  public String getMetadataPath() {
+return metadataPath;
   }
 
   /**
-   * Sets path.
+   * Sets container metadata path.
*
* @param path - String.
*/
-  public void setDbPath(String path) {
-this.dbPath = path;
+  public void setMetadataPath(String path) {
+this.metadataPath = path;
   }
 
   /**
-   * Get container file path.
+   * Get chunks path.
* @return - Physical path where container file and checksum is stored.
*/
-  public String getContainerFilePath() {
-return containerFilePath;
+  public String getChunksPath() {
+return chunksPath;
   }
 
   /**
-   * Set container Path.
-   * @param containerPath - File path.
+   * Set chunks Path.
+   * @param chunkPath - File path.
*/
-  public void setContainerFilePath(String containerPath) {
-this.containerFilePath = containerPath;
+  public void setChunksPath(String chunkPath) {
+this.chunksPath = chunkPath;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e228e54/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java
index b7ce0d9..d22092c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyValueYaml.java
@@ -144,8 +144,8 @@ public final class KeyValueYaml {
   // When a new field needs to be added, it needs to be added here.
   if (name.equals("containerType") || name.equals("containerId") ||
   name.equals("layOutVersion") || name.equals("state") ||
-  name.equals("metadata") || name.equals("dbPath") ||
-  name.equals("containerFilePath") || name.equals(
+  name.equals("metadata") || 

[12/16] hadoop git commit: YARN-8322. Change log level when there is an IOException when the allocation file is loaded. (Szilard Nemeth via Haibo Chen)

2018-06-11 Thread inigoiri
YARN-8322. Change log level when there is an IOException when the allocation 
file is loaded. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/676dcfff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/676dcfff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/676dcfff

Branch: refs/heads/HADOOP-15461
Commit: 676dc575fdf1c4b49aeae4e000bd60ca0a83
Parents: c190ac2
Author: Haibo Chen 
Authored: Mon Jun 11 11:16:21 2018 -0700
Committer: Haibo Chen 
Committed: Mon Jun 11 11:16:21 2018 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/676dcfff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index e541ab7..32cb236 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -109,7 +109,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   @Override
   public void serviceInit(Configuration conf) throws Exception {
 this.allocFile = getAllocationFile(conf);
-if(this.allocFile != null) {
+if (this.allocFile != null) {
   this.fs = allocFile.getFileSystem(conf);
   reloadThread = new Thread(() -> {
 while (running) {
@@ -138,7 +138,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   lastReloadAttemptFailed = true;
 }
   } catch (IOException e) {
-LOG.info("Exception while loading allocation file: " + e);
+LOG.error("Exception while loading allocation file: " + e);
   }
   try {
 Thread.sleep(reloadIntervalMs);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/16] hadoop git commit: HDFS-13664. Refactor ConfiguredFailoverProxyProvider to make inheritance easier. Contributed by Chao Sun.

2018-06-11 Thread inigoiri
HDFS-13664. Refactor ConfiguredFailoverProxyProvider to make inheritance 
easier. Contributed by Chao Sun.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fba1c42a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fba1c42a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fba1c42a

Branch: refs/heads/HADOOP-15461
Commit: fba1c42adc1c8ae57951e1865ec2ab05c8707bdf
Parents: cf41083
Author: Chao Sun 
Authored: Fri Jun 8 16:36:42 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Jun 8 16:36:42 2018 -0700

--
 .../namenode/ha/ConfiguredFailoverProxyProvider.java  | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fba1c42a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index e9c8791..58f4943 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -52,11 +52,11 @@ public class ConfiguredFailoverProxyProvider extends
   protected final Configuration conf;
   protected final List> proxies =
   new ArrayList>();
-  private final UserGroupInformation ugi;
+  protected final UserGroupInformation ugi;
   protected final Class xface;
 
   private int currentProxyIndex = 0;
-  private final HAProxyFactory factory;
+  protected final HAProxyFactory factory;
 
   public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
   Class xface, HAProxyFactory factory) {
@@ -122,6 +122,10 @@ public class ConfiguredFailoverProxyProvider extends
   @Override
   public synchronized ProxyInfo getProxy() {
 AddressRpcProxyPair current = proxies.get(currentProxyIndex);
+return getProxy(current);
+  }
+
+  protected ProxyInfo getProxy(AddressRpcProxyPair current) {
 if (current.namenode == null) {
   try {
 current.namenode = factory.createProxy(conf,
@@ -147,7 +151,7 @@ public class ConfiguredFailoverProxyProvider extends
* A little pair object to store the address and connected RPC proxy object 
to
* an NN. Note that {@link AddressRpcProxyPair#namenode} may be null.
*/
-  private static class AddressRpcProxyPair {
+  protected static class AddressRpcProxyPair {
 public final InetSocketAddress address;
 public T namenode;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/16] hadoop git commit: YARN-8323. FairScheduler.allocConf should be declared as volatile. (Szilard Nemeth via Haibo Chen)

2018-06-11 Thread inigoiri
YARN-8323. FairScheduler.allocConf should be declared as volatile. (Szilard 
Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c190ac2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c190ac2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c190ac2b

Branch: refs/heads/HADOOP-15461
Commit: c190ac2be88e574b3322cdc73a7c0af0cef708b2
Parents: 18201b8
Author: Haibo Chen 
Authored: Mon Jun 11 11:12:44 2018 -0700
Committer: Haibo Chen 
Committed: Mon Jun 11 11:12:44 2018 -0700

--
 .../yarn/server/resourcemanager/scheduler/fair/FairScheduler.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c190ac2b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 557e684..eb9f6af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -199,7 +199,7 @@ public class FairScheduler extends
 
   private AllocationFileLoaderService allocsLoader;
   @VisibleForTesting
-  AllocationConfiguration allocConf;
+  volatile AllocationConfiguration allocConf;
 
   // Container size threshold for making a reservation.
   @VisibleForTesting


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/16] hadoop git commit: HDFS-12670. can't renew HDFS tokens with only the hdfs client jar. Contributed by Arpit Agarwal.

2018-06-11 Thread inigoiri
HDFS-12670. can't renew HDFS tokens with only the hdfs client jar. Contributed 
by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/000a6783
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/000a6783
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/000a6783

Branch: refs/heads/HADOOP-15461
Commit: 000a67839666bf7cb39d3955757bb05fa95f1b18
Parents: fba1c42
Author: Arpit Agarwal 
Authored: Fri Jun 8 17:57:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Jun 8 17:57:12 2018 -0700

--
 ...rg.apache.hadoop.security.token.TokenIdentifier | 17 +
 .../org.apache.hadoop.security.token.TokenRenewer  | 16 
 ...rg.apache.hadoop.security.token.TokenIdentifier | 17 -
 .../org.apache.hadoop.security.token.TokenRenewer  | 16 
 4 files changed, 33 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/000a6783/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 000..b6b6171
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,17 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier$WebHdfsDelegationTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier$SWebHdfsDelegationTokenIdentifier

http://git-wip-us.apache.org/repos/asf/hadoop/blob/000a6783/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
new file mode 100644
index 000..7efd684
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
@@ -0,0 +1,16 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.hdfs.DFSClient$Renewer
+org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer
+org.apache.hadoop.hdfs.web.TokenAspect$TokenManager

http://git-wip-us.apache.org/repos/asf/hadoop/blob/000a6783/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
deleted file mode 100644
index b6b6171..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-#   Licensed under the Apache License, Version 2.0 (the 

[03/16] hadoop git commit: HADOOP-15482. Upgrade jackson-databind to version 2.9.5. Contributed by Lokesh Jain.

2018-06-11 Thread inigoiri
HADOOP-15482. Upgrade jackson-databind to version 2.9.5. Contributed by Lokesh 
Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c42dcc7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c42dcc7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c42dcc7c

Branch: refs/heads/HADOOP-15461
Commit: c42dcc7c47340d517563890269c6c112996e8897
Parents: 3b88fe2
Author: Jitendra Pandey 
Authored: Thu Jun 7 23:00:26 2018 -0700
Committer: Jitendra Pandey 
Committed: Thu Jun 7 23:00:26 2018 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c42dcc7c/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8edfd76..8cb5bfc 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -69,7 +69,7 @@
 
 
 1.9.13
-2.9.4
+2.9.5
 
 
 1.7.25


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/16] hadoop git commit: HADOOP-15520. Add tests for various org.apache.hadoop.util classes. Contributed by Arash Nabili

2018-06-11 Thread inigoiri
HADOOP-15520. Add tests for various org.apache.hadoop.util classes.
Contributed by Arash Nabili


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef0118b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef0118b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef0118b9

Branch: refs/heads/HADOOP-15461
Commit: ef0118b91e384b9a6d96c2ae64480d9acf5aa6fb
Parents: 000a678
Author: Steve Loughran 
Authored: Sat Jun 9 15:33:30 2018 +0100
Committer: Steve Loughran 
Committed: Sat Jun 9 15:33:38 2018 +0100

--
 .../util/TestCloseableReferenceCount.java   |  91 +
 .../hadoop/util/TestIntrusiveCollection.java| 193 +++
 .../hadoop/util/TestLimitInputStream.java   |  74 +++
 .../java/org/apache/hadoop/util/TestShell.java  |   8 +
 .../org/apache/hadoop/util/TestStringUtils.java |  27 +++
 .../hadoop/util/TestUTF8ByteArrayUtils.java |  57 ++
 6 files changed, 450 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef0118b9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
new file mode 100644
index 000..31e1899
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.nio.channels.ClosedChannelException;
+
+import org.junit.Test;
+
+import org.apache.hadoop.test.HadoopTestBase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestCloseableReferenceCount extends HadoopTestBase {
+  @Test
+  public void testReference() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.reference();
+assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+  }
+
+  @Test
+  public void testUnreference() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.reference();
+clr.reference();
+assertFalse("New reference count should not equal STATUS_CLOSED_MASK",
+clr.unreference());
+assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+  }
+
+  @Test
+  public void testUnreferenceCheckClosed() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.reference();
+clr.reference();
+clr.unreferenceCheckClosed();
+assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+  }
+
+  @Test
+  public void testSetClosed() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+assertTrue("Reference count should be open", clr.isOpen());
+clr.setClosed();
+assertFalse("Reference count should be closed", clr.isOpen());
+  }
+
+  @Test(expected = ClosedChannelException.class)
+  public void testReferenceClosedReference() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.setClosed();
+assertFalse("Reference count should be closed", clr.isOpen());
+clr.reference();
+  }
+
+  @Test(expected = ClosedChannelException.class)
+  public void testUnreferenceClosedReference() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.reference();
+clr.setClosed();
+assertFalse("Reference count should be closed", clr.isOpen());
+clr.unreferenceCheckClosed();
+  }
+
+  @Test(expected = ClosedChannelException.class)
+  public void testDoubleClose() throws 

[05/16] hadoop git commit: HDFS-13642. Creating a file with block size smaller than EC policy's cell size should fail.

2018-06-11 Thread inigoiri
HDFS-13642. Creating a file with block size smaller than EC policy's cell size 
should fail.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf410831
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf410831
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf410831

Branch: refs/heads/HADOOP-15461
Commit: cf4108313da83e28d07676078a33016ec8856ff6
Parents: a127244
Author: Xiao Chen 
Authored: Fri Jun 8 15:13:38 2018 -0700
Committer: Xiao Chen 
Committed: Fri Jun 8 15:14:11 2018 -0700

--
 .../server/namenode/FSDirErasureCodingOp.java   |  23 +++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  10 ++--
 .../hdfs/server/namenode/FSNamesystem.java  |  21 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   5 ++--
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  16 +
 .../hdfs/TestErasureCodingExerciseAPIs.java |   2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |   2 +-
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 7909 -> 7909 bytes
 .../src/test/resources/editsStored.xml  |   2 +-
 9 files changed, 58 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf410831/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 3a32db4..7160b86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.XAttr;
@@ -344,16 +345,28 @@ final class FSDirErasureCodingOp {
   }
 
   /**
-   * Check if the file or directory has an erasure coding policy.
+   * Get the erasure coding policy information for specified path and policy
+   * name. If ec policy name is given, it will be parsed and the corresponding
+   * policy will be returned. Otherwise, get the policy from the parents of the
+   * iip.
*
* @param fsn namespace
+   * @param ecPolicyName the ec policy name
* @param iip inodes in the path containing the file
-   * @return Whether the file or directory has an erasure coding policy.
+   * @return {@link ErasureCodingPolicy}, or null if no policy is found
* @throws IOException
*/
-  static boolean hasErasureCodingPolicy(final FSNamesystem fsn,
-  final INodesInPath iip) throws IOException {
-return unprotectedGetErasureCodingPolicy(fsn, iip) != null;
+  static ErasureCodingPolicy getErasureCodingPolicy(FSNamesystem fsn,
+  String ecPolicyName, INodesInPath iip) throws IOException {
+ErasureCodingPolicy ecPolicy;
+if (!StringUtils.isEmpty(ecPolicyName)) {
+  ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
+  fsn, ecPolicyName);
+} else {
+  ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
+  fsn, iip);
+}
+return ecPolicy;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf410831/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 8f34e1c..03c349c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.AddBlockFlag;
@@ -543,13 +542,8 @@ class FSDirWriteFileOp {
   boolean isStriped = false;
   ErasureCodingPolicy ecPolicy = null;
   if (!shouldReplicate) {
-

[01/16] hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter) [Forced Update!]

2018-06-11 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-15461 b59400dac -> ae9d83ac6 (forced update)


Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/351cf87c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/351cf87c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/351cf87c

Branch: refs/heads/HADOOP-15461
Commit: 351cf87c92872d90f62c476f85ae4d02e485769c
Parents: d5eca1a
Author: Robert Kanter 
Authored: Thu Jun 7 17:09:34 2018 -0700
Committer: Robert Kanter 
Committed: Thu Jun 7 17:09:34 2018 -0700

--
 .../impl/container-executor.c   | 54 ++--
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 19 ---
 .../src/site/markdown/NodeManagerCgroups.md |  2 +-
 4 files changed, 55 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/351cf87c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 1b8842a..baf0e8b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -73,6 +73,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"yarn", 
"mapred", "hdfs", "bin", 0}
 
 static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0;
 static const int DEFAULT_TC_SUPPORT_ENABLED = 0;
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
 
 static const char* PROC_PATH = "/proc";
 
@@ -482,6 +483,12 @@ int is_tc_support_enabled() {
 DEFAULT_TC_SUPPORT_ENABLED, _cfg);
 }
 
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED,
+  _cfg);
+}
+
 /**
  * Utility function to concatenate argB to argA using the concat_pattern.
  */
@@ -2346,20 +2353,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
 while ((ep = readdir(dp)) != NULL) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -2383,11 +2395,16 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(len);
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
-  struct stat sb;
 
   if (controller == NULL || mount_path == NULL) {
 fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n");
 result = OUT_OF_MEMORY;
+goto cleanup;
+  }
+  if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) {
+fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n");
+result = INVALID_COMMAND_PROVIDED;
+goto cleanup;
   }
   if (get_kv_key(pair, controller, len) < 0 ||
   get_kv_value(pair, mount_path, len) < 0) {
@@ -2395,13 +2412,10 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   pair);
 result = -1;
   } else {
-if (stat(mount_path, ) != 0) {
-  // Create mount point, if it does not exist
-  const mode_t mount_perms = S_IRWXU | S_IRGRP | S_IXGRP;
-  if (mkdirs(mount_path, mount_perms) == 0) {
-fprintf(LOGFILE, "Failed to create cgroup mount point %s at %s\n",
-  controller, mount_path);
-  }
+if (strstr(mount_path, "..") != 

[09/16] hadoop git commit: HDFS-13667:Typo: Marking all datandoes as stale. Contributed by Nanda Kumar

2018-06-11 Thread inigoiri
HDFS-13667:Typo: Marking all datandoes as stale. Contributed by Nanda Kumar


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccfb816d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccfb816d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccfb816d

Branch: refs/heads/HADOOP-15461
Commit: ccfb816d39878abf4172933327d788c59b9eb082
Parents: ef0118b
Author: Bharat Viswanadham 
Authored: Sat Jun 9 16:39:09 2018 -0700
Committer: Bharat Viswanadham 
Committed: Sat Jun 9 16:43:03 2018 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccfb816d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index e6cd513..9ebc693 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1834,7 +1834,7 @@ public class DatanodeManager {
   }
   
   public void markAllDatanodesStale() {
-LOG.info("Marking all datandoes as stale");
+LOG.info("Marking all datanodes as stale");
 synchronized (this) {
   for (DatanodeDescriptor dn : datanodeMap.values()) {
 for(DatanodeStorageInfo storage : dn.getStorageInfos()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/16] hadoop git commit: YARN-8321. AllocationFileLoaderService.getAllocationFile() should be declared as VisibleForTest. (Szilard Nemeth via Haibo Chen)

2018-06-11 Thread inigoiri
YARN-8321. AllocationFileLoaderService.getAllocationFile() should be declared 
as VisibleForTest. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/180b3c96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/180b3c96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/180b3c96

Branch: refs/heads/HADOOP-15461
Commit: 180b3c960bb693a68431c677d8c8b18821fb4361
Parents: 676dcff
Author: Haibo Chen 
Authored: Mon Jun 11 11:18:44 2018 -0700
Committer: Haibo Chen 
Committed: Mon Jun 11 11:19:33 2018 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/180b3c96/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 32cb236..56cc887 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -181,7 +181,8 @@ public class AllocationFileLoaderService extends 
AbstractService {
* path is relative, it is searched for in the
* classpath, but loaded like a regular File.
*/
-  public Path getAllocationFile(Configuration conf)
+  @VisibleForTesting
+  Path getAllocationFile(Configuration conf)
   throws UnsupportedFileSystemException {
 String allocFilePath = conf.get(FairSchedulerConfiguration.ALLOCATION_FILE,
 FairSchedulerConfiguration.DEFAULT_ALLOCATION_FILE);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/16] hadoop git commit: YARN-8359. Exclude containermanager.linux test classes on Windows. Contributed by Jason Lowe.

2018-06-11 Thread inigoiri
YARN-8359. Exclude containermanager.linux test classes on Windows. Contributed 
by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b88fe25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b88fe25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b88fe25

Branch: refs/heads/HADOOP-15461
Commit: 3b88fe25baf130cd7a77590f9ded5b0bf028ef75
Parents: 351cf87
Author: Inigo Goiri 
Authored: Thu Jun 7 17:09:31 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Jun 7 17:13:03 2018 -0700

--
 .../hadoop-yarn-server-nodemanager/pom.xml  | 21 
 1 file changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b88fe25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 5146820..26a5220 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -294,6 +294,27 @@
 
   
 
+
+  native-win
+  
+
+  Windows
+
+  
+  
+
+  
+org.apache.maven.plugins
+maven-surefire-plugin
+
+  
+
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.**
+  
+
+  
+
+  
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/16] hadoop git commit: HDDS-157. Upgrade common-langs version to 3.7 in HDDS and Ozone. Contributed by Takanobu Asanuma.

2018-06-11 Thread inigoiri
HDDS-157. Upgrade common-langs version to 3.7 in HDDS and Ozone.
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1272448
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1272448
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1272448

Branch: refs/heads/HADOOP-15461
Commit: a1272448bfa2f1a159d948b8635558e053b7be78
Parents: c42dcc7
Author: Anu Engineer 
Authored: Fri Jun 8 10:27:01 2018 -0700
Committer: Anu Engineer 
Committed: Fri Jun 8 10:27:01 2018 -0700

--
 .../org/apache/hadoop/hdds/scm/XceiverClientHandler.java | 2 +-
 .../src/main/java/org/apache/hadoop/hdds/client/BlockID.java | 2 +-
 .../common/states/endpoint/RegisterEndpointTask.java | 2 +-
 .../apache/hadoop/ozone/client/rest/response/KeyInfo.java| 4 ++--
 .../java/org/apache/hadoop/ozone/web/response/KeyInfo.java   | 4 ++--
 .../hadoop/ozone/TestStorageContainerManagerHelper.java  | 2 +-
 .../apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java   | 2 +-
 .../apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java | 2 +-
 .../org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java | 2 +-
 .../org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java  | 2 +-
 .../hadoop/ozone/ksm/TestMultipleContainerReadWrite.java | 2 +-
 .../java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java | 2 +-
 .../org/apache/hadoop/ozone/scm/TestAllocateContainer.java   | 2 +-
 .../apache/hadoop/ozone/scm/TestXceiverClientManager.java| 2 +-
 .../hadoop/ozone/web/TestOzoneRestWithMiniCluster.java   | 2 +-
 .../java/org/apache/hadoop/ozone/web/client/TestKeys.java| 8 
 .../org/apache/hadoop/ozone/web/client/TestKeysRatis.java| 2 +-
 .../java/org/apache/hadoop/ozone/web/client/TestVolume.java  | 2 +-
 .../java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java   | 2 +-
 .../src/main/java/org/apache/hadoop/ozone/freon/Freon.java   | 6 +++---
 .../hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java| 2 +-
 .../hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java| 2 +-
 .../hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java   | 2 +-
 .../apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java   | 2 +-
 .../java/org/apache/hadoop/ozone/genesis/GenesisUtil.java| 2 +-
 25 files changed, 32 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1272448/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
index 6a2286c..7c568f6 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
@@ -21,7 +21,7 @@ import com.google.common.base.Preconditions;
 import org.apache.ratis.shaded.io.netty.channel.Channel;
 import org.apache.ratis.shaded.io.netty.channel.ChannelHandlerContext;
 import org.apache.ratis.shaded.io.netty.channel.SimpleChannelInboundHandler;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1272448/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
index 7bf8f01..62b12e3 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
@@ -16,7 +16,7 @@
  */
 package org.apache.hadoop.hdds.client;
 
-import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1272448/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
--
diff --git 

[14/16] hadoop git commit: HDFS-13653. Make dfs.client.failover.random.order a per nameservice configuration. Contributed by Ekanth Sethuramalingam.

2018-06-11 Thread inigoiri
HDFS-13653. Make dfs.client.failover.random.order a per nameservice 
configuration. Contributed by Ekanth Sethuramalingam.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e5cfe6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e5cfe6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e5cfe6d

Branch: refs/heads/HADOOP-15461
Commit: 2e5cfe6df338c70965cfb0212a93617de3a6bd79
Parents: 180b3c9
Author: Inigo Goiri 
Authored: Mon Jun 11 11:16:52 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 11:20:29 2018 -0700

--
 .../ha/ConfiguredFailoverProxyProvider.java |  29 +-
 .../ha/TestConfiguredFailoverProxyProvider.java | 264 +++
 .../src/main/resources/hdfs-default.xml |  12 +
 .../hadoop/tools/TestHdfsConfigFields.java  |   1 +
 4 files changed, 303 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e5cfe6d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 58f4943..96722fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -94,9 +94,7 @@ public class ConfiguredFailoverProxyProvider extends
 proxies.add(new AddressRpcProxyPair(address));
   }
   // Randomize the list to prevent all clients pointing to the same one
-  boolean randomized = conf.getBoolean(
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER,
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  boolean randomized = getRandomOrder(conf, uri);
   if (randomized) {
 Collections.shuffle(proxies);
   }
@@ -111,6 +109,31 @@ public class ConfiguredFailoverProxyProvider extends
 }
   }
 
+  /**
+   * Check whether random order is configured for failover proxy provider
+   * for the namenode/nameservice.
+   *
+   * @param conf Configuration
+   * @param nameNodeUri The URI of namenode/nameservice
+   * @return random order configuration
+   */
+  private static boolean getRandomOrder(
+  Configuration conf, URI nameNodeUri) {
+String host = nameNodeUri.getHost();
+String configKeyWithHost = HdfsClientConfigKeys.Failover.RANDOM_ORDER
++ "." + host;
+
+if (conf.get(configKeyWithHost) != null) {
+  return conf.getBoolean(
+  configKeyWithHost,
+  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+}
+
+return conf.getBoolean(
+HdfsClientConfigKeys.Failover.RANDOM_ORDER,
+HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  }
+
   @Override
   public Class getInterface() {
 return xface;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e5cfe6d/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
new file mode 100644
index 000..d7a5db6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */

[15/16] hadoop git commit: HADOOP-15465. Deprecate WinUtils#Symlinks by using native java code. Contributed by Giovanni Matteo Fumarola.

2018-06-11 Thread inigoiri
HADOOP-15465. Deprecate WinUtils#Symlinks by using native java code. 
Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4011162
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4011162
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4011162

Branch: refs/heads/HADOOP-15461
Commit: b4011162d39abb7c867e78e9a6feb4ad14ba4687
Parents: 2e5cfe6
Author: Inigo Goiri 
Authored: Thu Jun 7 17:02:01 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 13:13:29 2018 -0700

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 60 
 .../apache/hadoop/fs/RawLocalFileSystem.java|  2 -
 .../main/java/org/apache/hadoop/util/Shell.java |  9 ++-
 .../hadoop/yarn/server/MiniYARNCluster.java | 13 ++---
 4 files changed, 37 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4011162/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index df89598..61cb8d2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -34,8 +34,10 @@ import java.net.URI;
 import java.net.UnknownHostException;
 import java.nio.charset.Charset;
 import java.nio.file.AccessDeniedException;
+import java.nio.file.FileAlreadyExistsException;
 import java.nio.file.FileSystems;
 import java.nio.file.Files;
+import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Enumeration;
 import java.util.List;
@@ -1028,17 +1030,15 @@ public class FileUtil {
   }
 
   /**
-   * Create a soft link between a src and destination
-   * only on a local disk. HDFS does not support this.
-   * On Windows, when symlink creation fails due to security
-   * setting, we will log a warning. The return code in this
-   * case is 2.
+   * Create a soft link between a src and destination only on a local disk. On
+   * Windows, when symlink creation fails due to security setting, we will log 
a
+   * warning. The return code in this case is 2.
*
* @param target the target for symlink
* @param linkname the symlink
* @return 0 on success
*/
-  public static int symLink(String target, String linkname) throws IOException{
+  public static int symLink(String target, String linkname) throws IOException 
{
 
 if (target == null || linkname == null) {
   LOG.warn("Can not create a symLink with a target = " + target
@@ -1053,44 +1053,32 @@ public class FileUtil {
 File linkFile = new File(
 Path.getPathWithoutSchemeAndAuthority(new Path(linkname)).toString());
 
-String[] cmd = Shell.getSymlinkCommand(
-targetFile.toString(),
-linkFile.toString());
-
-ShellCommandExecutor shExec;
 try {
-  if (Shell.WINDOWS &&
-  linkFile.getParentFile() != null &&
-  !new Path(target).isAbsolute()) {
-// Relative links on Windows must be resolvable at the time of
-// creation. To ensure this we run the shell command in the directory
-// of the link.
-//
-shExec = new ShellCommandExecutor(cmd, linkFile.getParentFile());
-  } else {
-shExec = new ShellCommandExecutor(cmd);
-  }
-  shExec.execute();
-} catch (Shell.ExitCodeException ec) {
-  int returnVal = ec.getExitCode();
-  if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) {
-LOG.warn("Fail to create symbolic links on Windows. "
-+ "The default security settings in Windows disallow non-elevated "
-+ "administrators and all non-administrators from creating 
symbolic links. "
-+ "This behavior can be changed in the Local Security Policy 
management console");
-  } else if (returnVal != 0) {
-LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed "
-+ returnVal + " with: " + ec.getMessage());
-  }
-  return returnVal;
+  Files.createSymbolicLink(Paths.get(linkFile.toString()),
+  Paths.get(targetFile.toString()));
+} catch (SecurityException e3) {
+  LOG.warn("Fail to create symbolic links on Windows. "
+  + "The default security settings in Windows disallow non-elevated "
+  + "administrators and all non-administrators from creating symbolic"
+  + " links. This behavior can be changed in the Local Security Policy"
+  + " management console");
+  return SYMLINK_NO_PRIVILEGE;
+
+} catch 

[16/16] hadoop git commit: HADOOP-15522. Deprecate Shell#ReadLink by using native java code. Contributed by Giovanni Matteo Fumarola.

2018-06-11 Thread inigoiri
HADOOP-15522. Deprecate Shell#ReadLink by using native java code. Contributed 
by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae9d83ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae9d83ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae9d83ac

Branch: refs/heads/HADOOP-15461
Commit: ae9d83ac69c61d28a9da620f92c07388b31fb10c
Parents: b401116
Author: Inigo Goiri 
Authored: Mon Jun 11 13:14:34 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 13:14:34 2018 -0700

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 21 +++-
 .../main/java/org/apache/hadoop/util/Shell.java |  8 +++-
 2 files changed, 19 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae9d83ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 61cb8d2..f3b5d58 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -196,22 +196,25 @@ public class FileUtil {
* a symlink.
*/
   public static String readLink(File f) {
-/* NB: Use readSymbolicLink in java.nio.file.Path once available. Could
- * use getCanonicalPath in File to get the target of the symlink but that
- * does not indicate if the given path refers to a symlink.
- */
 
 if (f == null) {
   LOG.warn("Can not read a null symLink");
   return "";
 }
 
-try {
-  return Shell.execCommand(
-  Shell.getReadlinkCommand(f.toString())).trim();
-} catch (IOException x) {
-  return "";
+if (Files.isSymbolicLink(f.toPath())) {
+  java.nio.file.Path p = null;
+  try {
+p = Files.readSymbolicLink(f.toPath());
+  } catch (Exception e) {
+LOG.warn("Exception while reading the symbolic link "
++ f.getAbsolutePath() + ". Exception= " + e.getMessage());
+return "";
+  }
+  return p.toAbsolutePath().toString();
 }
+LOG.warn("The file " + f.getAbsolutePath() + " is not a symbolic link.");
+return "";
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae9d83ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 091299c..c0ba5d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -309,7 +309,13 @@ public abstract class Shell {
: new String[] { "ln", "-s", target, link };
   }
 
-  /** Return a command to read the target of the a symbolic link. */
+  /**
+   * Return a command to read the target of the a symbolic link.
+   *
+   * Deprecated and likely to be deleted in the near future. Please use
+   * FileUtil.symlink().
+   */
+  @Deprecated
   public static String[] getReadlinkCommand(String link) {
 return WINDOWS ?
 new String[] { getWinUtilsPath(), "readlink", link }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/16] hadoop git commit: HADOOP-15499. Performance severe drops when running RawErasureCoderBenchmark with NativeRSRawErasureCoder. Contributed by Sammi Chen.

2018-06-11 Thread inigoiri
HADOOP-15499. Performance severe drops when running RawErasureCoderBenchmark 
with NativeRSRawErasureCoder. Contributed by Sammi Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18201b88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18201b88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18201b88

Branch: refs/heads/HADOOP-15461
Commit: 18201b882a38ad875358c5d23c09b0ef903c2f91
Parents: ccfb816
Author: Sammi Chen 
Authored: Mon Jun 11 13:53:37 2018 +0800
Committer: Sammi Chen 
Committed: Mon Jun 11 13:53:37 2018 +0800

--
 .../rawcoder/AbstractNativeRawDecoder.java  | 51 
 .../rawcoder/AbstractNativeRawEncoder.java  | 49 +++
 .../rawcoder/NativeRSRawDecoder.java| 19 ++--
 .../rawcoder/NativeRSRawEncoder.java| 19 ++--
 .../rawcoder/NativeXORRawDecoder.java   | 19 ++--
 .../rawcoder/NativeXORRawEncoder.java   | 19 ++--
 .../rawcoder/RawErasureCoderBenchmark.java  |  6 +++
 7 files changed, 127 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18201b88/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
index e845747..cb71a80 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
@@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * Abstract native raw decoder for all native coders to extend with.
@@ -34,36 +35,46 @@ abstract class AbstractNativeRawDecoder extends 
RawErasureDecoder {
   public static Logger LOG =
   LoggerFactory.getLogger(AbstractNativeRawDecoder.class);
 
+  // Protect ISA-L coder data structure in native layer from being accessed and
+  // updated concurrently by the init, release and decode functions.
+  protected final ReentrantReadWriteLock decoderLock =
+  new ReentrantReadWriteLock();
+
   public AbstractNativeRawDecoder(ErasureCoderOptions coderOptions) {
 super(coderOptions);
   }
 
   @Override
-  protected synchronized void doDecode(ByteBufferDecodingState decodingState)
+  protected void doDecode(ByteBufferDecodingState decodingState)
   throws IOException {
-if (nativeCoder == 0) {
-  throw new IOException(String.format("%s closed",
-  getClass().getSimpleName()));
-}
-int[] inputOffsets = new int[decodingState.inputs.length];
-int[] outputOffsets = new int[decodingState.outputs.length];
+decoderLock.readLock().lock();
+try {
+  if (nativeCoder == 0) {
+throw new IOException(String.format("%s closed",
+getClass().getSimpleName()));
+  }
+  int[] inputOffsets = new int[decodingState.inputs.length];
+  int[] outputOffsets = new int[decodingState.outputs.length];
 
-ByteBuffer buffer;
-for (int i = 0; i < decodingState.inputs.length; ++i) {
-  buffer = decodingState.inputs[i];
-  if (buffer != null) {
-inputOffsets[i] = buffer.position();
+  ByteBuffer buffer;
+  for (int i = 0; i < decodingState.inputs.length; ++i) {
+buffer = decodingState.inputs[i];
+if (buffer != null) {
+  inputOffsets[i] = buffer.position();
+}
   }
-}
 
-for (int i = 0; i < decodingState.outputs.length; ++i) {
-  buffer = decodingState.outputs[i];
-  outputOffsets[i] = buffer.position();
-}
+  for (int i = 0; i < decodingState.outputs.length; ++i) {
+buffer = decodingState.outputs[i];
+outputOffsets[i] = buffer.position();
+  }
 
-performDecodeImpl(decodingState.inputs, inputOffsets,
-decodingState.decodeLength, decodingState.erasedIndexes,
-decodingState.outputs, outputOffsets);
+  performDecodeImpl(decodingState.inputs, inputOffsets,
+  decodingState.decodeLength, decodingState.erasedIndexes,
+  decodingState.outputs, outputOffsets);
+} finally {
+  decoderLock.readLock().unlock();
+}
   }
 
   protected abstract void performDecodeImpl(ByteBuffer[] inputs,


hadoop git commit: Revert "Create Version File in Datanode. Contributed by Bharat Viswanadham."

2018-06-11 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 143dd560b -> 0e437f9b1


Revert "Create Version File in Datanode. Contributed by Bharat Viswanadham."

This reverts commit f26d3466d79125123cba00ab81481655d7bfe3c1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e437f9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e437f9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e437f9b

Branch: refs/heads/HDDS-48
Commit: 0e437f9b174f3b1eaf41b63ae707dd76379b8e8b
Parents: 143dd56
Author: Hanisha Koneru 
Authored: Mon Jun 11 12:15:39 2018 -0700
Committer: Hanisha Koneru 
Committed: Mon Jun 11 12:15:39 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   2 -
 .../org/apache/hadoop/ozone/common/Storage.java |   6 +-
 .../container/common/DataNodeLayoutVersion.java |  80 -
 .../common/helpers/DatanodeVersionFile.java | 172 ---
 .../states/datanode/RunningDatanodeState.java   |   3 +-
 .../states/endpoint/VersionEndpointTask.java|  71 +---
 .../container/ozoneimpl/OzoneContainer.java |   8 +-
 .../hadoop/ozone/protocol/VersionResponse.java  |   4 -
 .../ozone/container/common/ScmTestMock.java |  24 ---
 .../common/TestDatanodeLayOutVersion.java   |  38 
 .../common/TestDatanodeStateMachine.java|   3 +-
 .../common/helpers/TestDatanodeVersionFile.java | 120 -
 .../hadoop/hdds/scm/node/SCMNodeManager.java|   2 -
 .../ozone/container/common/TestEndPoint.java| 169 +-
 14 files changed, 14 insertions(+), 688 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e437f9b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index ce1a733..451a08f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -29,8 +29,6 @@ public final class OzoneConsts {
 
   public static final String STORAGE_DIR = "scm";
   public static final String SCM_ID = "scmUuid";
-  public static final String LAYOUTVERSION = "layOutVersion";
-  public static final String CTIME = "ctime";
 
   public static final String OZONE_SIMPLE_ROOT_USER = "root";
   public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e437f9b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index 35ddc71..fb30d92 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -45,10 +45,8 @@ import java.util.Properties;
 public abstract class Storage {
   private static final Logger LOG = LoggerFactory.getLogger(Storage.class);
 
-  public static final String STORAGE_DIR_CURRENT = "current";
-  public static final String STORAGE_FILE_VERSION = "VERSION";
-  public static final String STORAGE_DIR_HDDS = "hdds";
-
+  protected static final String STORAGE_DIR_CURRENT = "current";
+  protected static final String STORAGE_FILE_VERSION = "VERSION";
 
   private final NodeType nodeType;
   private final File root;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e437f9b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
deleted file mode 100644
index 2d58c39..000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * 

hadoop git commit: HDFS-13653. Make dfs.client.failover.random.order a per nameservice configuration. Contributed by Ekanth Sethuramalingam.

2018-06-11 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 180b3c960 -> 2e5cfe6df


HDFS-13653. Make dfs.client.failover.random.order a per nameservice 
configuration. Contributed by Ekanth Sethuramalingam.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e5cfe6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e5cfe6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e5cfe6d

Branch: refs/heads/trunk
Commit: 2e5cfe6df338c70965cfb0212a93617de3a6bd79
Parents: 180b3c9
Author: Inigo Goiri 
Authored: Mon Jun 11 11:16:52 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 11:20:29 2018 -0700

--
 .../ha/ConfiguredFailoverProxyProvider.java |  29 +-
 .../ha/TestConfiguredFailoverProxyProvider.java | 264 +++
 .../src/main/resources/hdfs-default.xml |  12 +
 .../hadoop/tools/TestHdfsConfigFields.java  |   1 +
 4 files changed, 303 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e5cfe6d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 58f4943..96722fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -94,9 +94,7 @@ public class ConfiguredFailoverProxyProvider extends
 proxies.add(new AddressRpcProxyPair(address));
   }
   // Randomize the list to prevent all clients pointing to the same one
-  boolean randomized = conf.getBoolean(
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER,
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  boolean randomized = getRandomOrder(conf, uri);
   if (randomized) {
 Collections.shuffle(proxies);
   }
@@ -111,6 +109,31 @@ public class ConfiguredFailoverProxyProvider extends
 }
   }
 
+  /**
+   * Check whether random order is configured for failover proxy provider
+   * for the namenode/nameservice.
+   *
+   * @param conf Configuration
+   * @param nameNodeUri The URI of namenode/nameservice
+   * @return random order configuration
+   */
+  private static boolean getRandomOrder(
+  Configuration conf, URI nameNodeUri) {
+String host = nameNodeUri.getHost();
+String configKeyWithHost = HdfsClientConfigKeys.Failover.RANDOM_ORDER
++ "." + host;
+
+if (conf.get(configKeyWithHost) != null) {
+  return conf.getBoolean(
+  configKeyWithHost,
+  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+}
+
+return conf.getBoolean(
+HdfsClientConfigKeys.Failover.RANDOM_ORDER,
+HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  }
+
   @Override
   public Class getInterface() {
 return xface;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e5cfe6d/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
new file mode 100644
index 000..d7a5db6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific 

hadoop git commit: YARN-8321. AllocationFileLoaderService.getAllocationFile() should be declared as VisibleForTest. (Szilard Nemeth via Haibo Chen)

2018-06-11 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 676dc -> 180b3c960


YARN-8321. AllocationFileLoaderService.getAllocationFile() should be declared 
as VisibleForTest. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/180b3c96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/180b3c96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/180b3c96

Branch: refs/heads/trunk
Commit: 180b3c960bb693a68431c677d8c8b18821fb4361
Parents: 676dcff
Author: Haibo Chen 
Authored: Mon Jun 11 11:18:44 2018 -0700
Committer: Haibo Chen 
Committed: Mon Jun 11 11:19:33 2018 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/180b3c96/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 32cb236..56cc887 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -181,7 +181,8 @@ public class AllocationFileLoaderService extends 
AbstractService {
* path is relative, it is searched for in the
* classpath, but loaded like a regular File.
*/
-  public Path getAllocationFile(Configuration conf)
+  @VisibleForTesting
+  Path getAllocationFile(Configuration conf)
   throws UnsupportedFileSystemException {
 String allocFilePath = conf.get(FairSchedulerConfiguration.ALLOCATION_FILE,
 FairSchedulerConfiguration.DEFAULT_ALLOCATION_FILE);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDFS-13653. Make dfs.client.failover.random.order a per nameservice configuration. Contributed by Ekanth Sethuramalingam.

2018-06-11 Thread inigoiri
HDFS-13653. Make dfs.client.failover.random.order a per nameservice 
configuration. Contributed by Ekanth Sethuramalingam.

(cherry picked from commit 784dbbd2d1fc0c361fee851d3f9b3aa50be5481d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6eea6d28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6eea6d28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6eea6d28

Branch: refs/heads/branch-3.0
Commit: 6eea6d28de32d453beabd04a6e5be59c989f4100
Parents: b874110
Author: Inigo Goiri 
Authored: Mon Jun 11 11:16:52 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 11:18:04 2018 -0700

--
 .../ha/ConfiguredFailoverProxyProvider.java |  29 +-
 .../ha/TestConfiguredFailoverProxyProvider.java | 264 +++
 .../src/main/resources/hdfs-default.xml |  12 +
 .../hadoop/tools/TestHdfsConfigFields.java  |   1 +
 4 files changed, 303 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eea6d28/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 58f4943..96722fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -94,9 +94,7 @@ public class ConfiguredFailoverProxyProvider extends
 proxies.add(new AddressRpcProxyPair(address));
   }
   // Randomize the list to prevent all clients pointing to the same one
-  boolean randomized = conf.getBoolean(
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER,
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  boolean randomized = getRandomOrder(conf, uri);
   if (randomized) {
 Collections.shuffle(proxies);
   }
@@ -111,6 +109,31 @@ public class ConfiguredFailoverProxyProvider extends
 }
   }
 
+  /**
+   * Check whether random order is configured for failover proxy provider
+   * for the namenode/nameservice.
+   *
+   * @param conf Configuration
+   * @param nameNodeUri The URI of namenode/nameservice
+   * @return random order configuration
+   */
+  private static boolean getRandomOrder(
+  Configuration conf, URI nameNodeUri) {
+String host = nameNodeUri.getHost();
+String configKeyWithHost = HdfsClientConfigKeys.Failover.RANDOM_ORDER
++ "." + host;
+
+if (conf.get(configKeyWithHost) != null) {
+  return conf.getBoolean(
+  configKeyWithHost,
+  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+}
+
+return conf.getBoolean(
+HdfsClientConfigKeys.Failover.RANDOM_ORDER,
+HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  }
+
   @Override
   public Class getInterface() {
 return xface;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eea6d28/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
new file mode 100644
index 000..d7a5db6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language 

[1/2] hadoop git commit: HDFS-13653. Make dfs.client.failover.random.order a per nameservice configuration. Contributed by Ekanth Sethuramalingam.

2018-06-11 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b87411027 -> 6eea6d28d
  refs/heads/branch-3.1 e3c96354a -> f0150f024


HDFS-13653. Make dfs.client.failover.random.order a per nameservice 
configuration. Contributed by Ekanth Sethuramalingam.

(cherry picked from commit 784dbbd2d1fc0c361fee851d3f9b3aa50be5481d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0150f02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0150f02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0150f02

Branch: refs/heads/branch-3.1
Commit: f0150f024f7e18e782666cede0b7e3c5881badd2
Parents: e3c9635
Author: Inigo Goiri 
Authored: Mon Jun 11 11:16:52 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jun 11 11:17:37 2018 -0700

--
 .../ha/ConfiguredFailoverProxyProvider.java |  29 +-
 .../ha/TestConfiguredFailoverProxyProvider.java | 264 +++
 .../src/main/resources/hdfs-default.xml |  12 +
 .../hadoop/tools/TestHdfsConfigFields.java  |   1 +
 4 files changed, 303 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0150f02/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 58f4943..96722fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -94,9 +94,7 @@ public class ConfiguredFailoverProxyProvider extends
 proxies.add(new AddressRpcProxyPair(address));
   }
   // Randomize the list to prevent all clients pointing to the same one
-  boolean randomized = conf.getBoolean(
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER,
-  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  boolean randomized = getRandomOrder(conf, uri);
   if (randomized) {
 Collections.shuffle(proxies);
   }
@@ -111,6 +109,31 @@ public class ConfiguredFailoverProxyProvider extends
 }
   }
 
+  /**
+   * Check whether random order is configured for failover proxy provider
+   * for the namenode/nameservice.
+   *
+   * @param conf Configuration
+   * @param nameNodeUri The URI of namenode/nameservice
+   * @return random order configuration
+   */
+  private static boolean getRandomOrder(
+  Configuration conf, URI nameNodeUri) {
+String host = nameNodeUri.getHost();
+String configKeyWithHost = HdfsClientConfigKeys.Failover.RANDOM_ORDER
++ "." + host;
+
+if (conf.get(configKeyWithHost) != null) {
+  return conf.getBoolean(
+  configKeyWithHost,
+  HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+}
+
+return conf.getBoolean(
+HdfsClientConfigKeys.Failover.RANDOM_ORDER,
+HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT);
+  }
+
   @Override
   public Class getInterface() {
 return xface;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0150f02/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
new file mode 100644
index 000..d7a5db6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConfiguredFailoverProxyProvider.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" 

hadoop git commit: YARN-8322. Change log level when there is an IOException when the allocation file is loaded. (Szilard Nemeth via Haibo Chen)

2018-06-11 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk c190ac2be -> 676dc


YARN-8322. Change log level when there is an IOException when the allocation 
file is loaded. (Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/676dcfff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/676dcfff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/676dcfff

Branch: refs/heads/trunk
Commit: 676dc575fdf1c4b49aeae4e000bd60ca0a83
Parents: c190ac2
Author: Haibo Chen 
Authored: Mon Jun 11 11:16:21 2018 -0700
Committer: Haibo Chen 
Committed: Mon Jun 11 11:16:21 2018 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/676dcfff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index e541ab7..32cb236 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -109,7 +109,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   @Override
   public void serviceInit(Configuration conf) throws Exception {
 this.allocFile = getAllocationFile(conf);
-if(this.allocFile != null) {
+if (this.allocFile != null) {
   this.fs = allocFile.getFileSystem(conf);
   reloadThread = new Thread(() -> {
 while (running) {
@@ -138,7 +138,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   lastReloadAttemptFailed = true;
 }
   } catch (IOException e) {
-LOG.info("Exception while loading allocation file: " + e);
+LOG.error("Exception while loading allocation file: " + e);
   }
   try {
 Thread.sleep(reloadIntervalMs);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8323. FairScheduler.allocConf should be declared as volatile. (Szilard Nemeth via Haibo Chen)

2018-06-11 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 18201b882 -> c190ac2be


YARN-8323. FairScheduler.allocConf should be declared as volatile. (Szilard 
Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c190ac2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c190ac2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c190ac2b

Branch: refs/heads/trunk
Commit: c190ac2be88e574b3322cdc73a7c0af0cef708b2
Parents: 18201b8
Author: Haibo Chen 
Authored: Mon Jun 11 11:12:44 2018 -0700
Committer: Haibo Chen 
Committed: Mon Jun 11 11:12:44 2018 -0700

--
 .../yarn/server/resourcemanager/scheduler/fair/FairScheduler.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c190ac2b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 557e684..eb9f6af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -199,7 +199,7 @@ public class FairScheduler extends
 
   private AllocationFileLoaderService allocsLoader;
   @VisibleForTesting
-  AllocationConfiguration allocConf;
+  volatile AllocationConfiguration allocConf;
 
   // Container size threshold for making a reservation.
   @VisibleForTesting


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/16] hadoop git commit: HDDS-127. Add CloseContainerEventHandler in SCM. Contributed by Shashikant Banerjee.

2018-06-11 Thread sunchao
HDDS-127. Add CloseContainerEventHandler in SCM.
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78761e87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78761e87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78761e87

Branch: refs/heads/HDFS-12943
Commit: 78761e87a7f3012ef2d96e294d55b323b76b7c42
Parents: ba303b1
Author: Anu Engineer 
Authored: Thu Jun 7 14:35:22 2018 -0700
Committer: Anu Engineer 
Committed: Thu Jun 7 14:35:22 2018 -0700

--
 .../container/CloseContainerEventHandler.java   |  83 +
 .../hdds/scm/container/ContainerMapping.java|   5 +
 .../scm/container/ContainerStateManager.java|   9 +
 .../hadoop/hdds/scm/container/Mapping.java  |   6 +
 .../TestCloseContainerEventHandler.java | 177 +++
 5 files changed, 280 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78761e87/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
new file mode 100644
index 000..bc95b55
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
+import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * In case of a node failure, volume failure, volume out of spapce, node
+ * out of space etc, CLOSE_CONTAINER_EVENT will be triggered.
+ * CloseContainerEventHandler is the handler for CLOSE_CONTAINER_EVENT.
+ * When a close container event is fired, a close command for the container
+ * should be sent to all the datanodes in the pipeline and 
containerStateManager
+ * needs to update the container state to Closing.
+ */
+public class CloseContainerEventHandler implements EventHandler {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(CloseContainerEventHandler.class);
+
+  public static final TypedEvent CLOSE_CONTAINER_EVENT =
+new TypedEvent<>(ContainerID.class);
+
+  private final Mapping containerManager;
+
+  public CloseContainerEventHandler(Mapping containerManager) {
+this.containerManager = containerManager;
+  }
+
+  @Override
+  public void onMessage(ContainerID containerID, EventPublisher publisher) {
+
+LOG.info("Close container Event triggered for container : {}",
+containerID.getId());
+ContainerStateManager stateManager = containerManager.getStateManager();
+ContainerInfo info = stateManager.getContainer(containerID);
+if (info == null) {
+  LOG.info("Container with id : {} does not exist", containerID.getId());
+  return;
+}
+if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
+  for (DatanodeDetails datanode : info.getPipeline().getMachines()) {
+
containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(),
+new CloseContainerCommand(containerID.getId()));
+  }
+  try {
+// Finalize event will make sure the state of the container transitions
+// from OPEN to CLOSING in containerStateManager.
+stateManager
+.updateContainerState(info, 

[01/16] hadoop git commit: HADOOP-15516. Add test cases to cover FileUtil#readLink. Contributed by Giovanni Matteo Fumarola.

2018-06-11 Thread sunchao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 990a22181 -> 9756c2cbe


HADOOP-15516. Add test cases to cover FileUtil#readLink. Contributed by 
Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12be8bad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12be8bad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12be8bad

Branch: refs/heads/HDFS-12943
Commit: 12be8bad7debd67c9ea72b979a39c8cf42c5f37d
Parents: 7969cc4
Author: Inigo Goiri 
Authored: Thu Jun 7 13:34:52 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Jun 7 13:34:52 2018 -0700

--
 .../java/org/apache/hadoop/fs/FileUtil.java |  6 +++
 .../java/org/apache/hadoop/fs/TestFileUtil.java | 52 
 2 files changed, 58 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12be8bad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index ed10f1c..df89598 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -198,6 +198,12 @@ public class FileUtil {
  * use getCanonicalPath in File to get the target of the symlink but that
  * does not indicate if the given path refers to a symlink.
  */
+
+if (f == null) {
+  LOG.warn("Can not read a null symLink");
+  return "";
+}
+
 try {
   return Shell.execCommand(
   Shell.getReadlinkCommand(f.toString())).trim();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12be8bad/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index 01fa563..f557103 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -1441,4 +1441,56 @@ public class TestFileUtil {
 }
   }
 
+  /**
+   * This test validates the correctness of {@link FileUtil#readLink(File)} in
+   * case of null pointer inputs.
+   */
+  @Test
+  public void testReadSymlinkWithNullInput() {
+String result = FileUtil.readLink(null);
+Assert.assertEquals("", result);
+  }
+
+  /**
+   * This test validates the correctness of {@link FileUtil#readLink(File)}.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testReadSymlink() throws IOException {
+Assert.assertFalse(del.exists());
+del.mkdirs();
+
+File file = new File(del, FILE);
+File link = new File(del, "_link");
+
+// Create a symbolic link
+FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
+
+String result = FileUtil.readLink(link);
+Assert.assertEquals(file.getAbsolutePath(), result);
+
+file.delete();
+link.delete();
+  }
+
+  /**
+   * This test validates the correctness of {@link FileUtil#readLink(File)} 
when
+   * it gets a file in input.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testReadSymlinkWithAFileAsInput() throws IOException {
+Assert.assertFalse(del.exists());
+del.mkdirs();
+
+File file = new File(del, FILE);
+
+String result = FileUtil.readLink(file);
+Assert.assertEquals("", result);
+
+file.delete();
+  }
+
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/16] hadoop git commit: YARN-8359. Exclude containermanager.linux test classes on Windows. Contributed by Jason Lowe.

2018-06-11 Thread sunchao
YARN-8359. Exclude containermanager.linux test classes on Windows. Contributed 
by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b88fe25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b88fe25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b88fe25

Branch: refs/heads/HDFS-12943
Commit: 3b88fe25baf130cd7a77590f9ded5b0bf028ef75
Parents: 351cf87
Author: Inigo Goiri 
Authored: Thu Jun 7 17:09:31 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Jun 7 17:13:03 2018 -0700

--
 .../hadoop-yarn-server-nodemanager/pom.xml  | 21 
 1 file changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b88fe25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 5146820..26a5220 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -294,6 +294,27 @@
 
   
 
+
+  native-win
+  
+
+  Windows
+
+  
+  
+
+  
+org.apache.maven.plugins
+maven-surefire-plugin
+
+  
+
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.**
+  
+
+  
+
+  
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/16] hadoop git commit: YARN-6677. Preempt opportunistic containers when root container cgroup goes over memory limit. Contributed by Haibo Chen.

2018-06-11 Thread sunchao
YARN-6677. Preempt opportunistic containers when root container cgroup goes 
over memory limit. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5eca1a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5eca1a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5eca1a6

Branch: refs/heads/HDFS-12943
Commit: d5eca1a6a0e3939eead6711805b7a61c364d254b
Parents: 67fc70e
Author: Miklos Szegedi 
Authored: Thu Jun 7 14:58:56 2018 -0700
Committer: Miklos Szegedi 
Committed: Thu Jun 7 16:38:23 2018 -0700

--
 .../containermanager/container/Container.java   |   8 +
 .../container/ContainerImpl.java|   5 +
 .../linux/resources/DefaultOOMHandler.java  | 249 +++--
 .../linux/resources/TestDefaultOOMHandler.java  | 922 ---
 .../nodemanager/webapp/MockContainer.java   |   5 +
 5 files changed, 989 insertions(+), 200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5eca1a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
index 86f2554..5d48d84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
@@ -37,8 +37,16 @@ public interface Container extends 
EventHandler {
 
   ContainerId getContainerId();
 
+  /**
+   * The timestamp when the container start request is received.
+   */
   long getContainerStartTime();
 
+  /**
+   * The timestamp when the container is allowed to be launched.
+   */
+  long getContainerLaunchTime();
+
   Resource getResource();
 
   ContainerTokenIdentifier getContainerTokenIdentifier();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5eca1a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 5527ac4..95ab374 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -883,6 +883,11 @@ public class ContainerImpl implements Container {
   }
 
   @Override
+  public long getContainerLaunchTime() {
+return this.containerLaunchStartTime;
+  }
+
+  @Override
   public Resource getResource() {
 return Resources.clone(
 this.containerTokenIdentifier.getResource());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5eca1a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
index c690225..202e7d0 100644
--- 

[04/16] hadoop git commit: YARN-8400. Fix typos in YARN Federation documentation page. Contributed by Giovanni Matteo Fumarola.

2018-06-11 Thread sunchao
YARN-8400. Fix typos in YARN Federation documentation page. Contributed by 
Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67fc70e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67fc70e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67fc70e0

Branch: refs/heads/HDFS-12943
Commit: 67fc70e09f941e9b43b022d9f42a9486ad759e6e
Parents: 78761e8
Author: Inigo Goiri 
Authored: Thu Jun 7 16:10:15 2018 -0700
Committer: Inigo Goiri 
Committed: Thu Jun 7 16:10:15 2018 -0700

--
 .../hadoop-yarn-site/src/site/markdown/Federation.md  | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67fc70e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
index 087a5b0..953f826 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
@@ -42,7 +42,7 @@ The applications running in this federated environment see a 
unified large YARN
 ![YARN Federation Architecture | 
width=800](./images/federation_architecture.png)
 
 ###YARN Sub-cluster
-A sub-cluster is a YARN cluster with up to few thousands nodes. The exact size 
of the sub-cluster will be determined considering ease of 
deployment/maintenance, alignment
+A sub-cluster is a YARN cluster with up to a few thousand nodes. The exact 
size of the sub-cluster will be determined considering ease of 
deployment/maintenance, alignment
 with network or availability zones and general best practices.
 
 The sub-cluster YARN RM will run with work-preserving high-availability 
turned-on, i.e., we should be able to tolerate YARN RM, NM failures with 
minimal disruption.
@@ -80,7 +80,7 @@ to minimize overhead on the scheduling infrastructure (more 
in section on scalab
 
 ###Global Policy Generator
 Global Policy Generator overlooks the entire federation and ensures that the 
system is configured and tuned properly all the time.
-A key design point is that the cluster availability does not depends on an 
always-on GPG. The GPG operates continuously but out-of-band from all cluster 
operations,
+A key design point is that the cluster availability does not depend on an 
always-on GPG. The GPG operates continuously but out-of-band from all cluster 
operations,
 and provide us with a unique vantage point, that allows to enforce global 
invariants, affect load balancing, trigger draining of sub-clusters that will 
undergo maintenance, etc.
 More precisely the GPG will update user capacity allocation-to-subcluster 
mappings, and more rarely change the policies that run in Routers, AMRMProxy 
(and possible RMs).
 
@@ -111,7 +111,7 @@ on the home sub-cluster. Only in certain cases it should 
need to ask for resourc
 The federation Policy Store is a logically separate store (while it might be 
backed
 by the same physical component), which contains information about how 
applications and
 resource requests are routed to different sub-clusters. The current 
implementation provides
-several policies, ranging from random/hashing/roundrobin/priority to more 
sophisticated
+several policies, ranging from random/hashing/round-robin/priority to more 
sophisticated
 ones which account for sub-cluster load, and request locality needs.
 
 
@@ -218,7 +218,7 @@ SQL-Server scripts are located in 
**sbin/FederationStateStore/SQLServer/**.
 |`yarn.federation.policy-manager` | 
`org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager`
 | The choice of policy manager determines how Applications and 
ResourceRequests are routed through the system. |
 |`yarn.federation.policy-manager-params` | `` | The payload that 
configures the policy. In our example a set of weights for router and amrmproxy 
policies. This is typically generated by serializing a policymanager that has 
been configured programmatically, or by populating the state-store with the 
.json serialized form of it. |
 |`yarn.federation.subcluster-resolver.class` | 
`org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl`
 | The class used to resolve which subcluster a node belongs to, and which 
subcluster(s) a rack belongs to. |
-|`yarn.federation.machine-list` | `` | Path of 
machine-list file used by `SubClusterResolver`. Each line of the file is a node 
with sub-cluster and rack information. Below is the example:   node1, 
subcluster1, rack1  node2, subcluster2, rack1  node3, 

[15/16] hadoop git commit: HADOOP-15499. Performance severe drops when running RawErasureCoderBenchmark with NativeRSRawErasureCoder. Contributed by Sammi Chen.

2018-06-11 Thread sunchao
HADOOP-15499. Performance severe drops when running RawErasureCoderBenchmark 
with NativeRSRawErasureCoder. Contributed by Sammi Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18201b88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18201b88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18201b88

Branch: refs/heads/HDFS-12943
Commit: 18201b882a38ad875358c5d23c09b0ef903c2f91
Parents: ccfb816
Author: Sammi Chen 
Authored: Mon Jun 11 13:53:37 2018 +0800
Committer: Sammi Chen 
Committed: Mon Jun 11 13:53:37 2018 +0800

--
 .../rawcoder/AbstractNativeRawDecoder.java  | 51 
 .../rawcoder/AbstractNativeRawEncoder.java  | 49 +++
 .../rawcoder/NativeRSRawDecoder.java| 19 ++--
 .../rawcoder/NativeRSRawEncoder.java| 19 ++--
 .../rawcoder/NativeXORRawDecoder.java   | 19 ++--
 .../rawcoder/NativeXORRawEncoder.java   | 19 ++--
 .../rawcoder/RawErasureCoderBenchmark.java  |  6 +++
 7 files changed, 127 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18201b88/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
index e845747..cb71a80 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
@@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * Abstract native raw decoder for all native coders to extend with.
@@ -34,36 +35,46 @@ abstract class AbstractNativeRawDecoder extends 
RawErasureDecoder {
   public static Logger LOG =
   LoggerFactory.getLogger(AbstractNativeRawDecoder.class);
 
+  // Protect ISA-L coder data structure in native layer from being accessed and
+  // updated concurrently by the init, release and decode functions.
+  protected final ReentrantReadWriteLock decoderLock =
+  new ReentrantReadWriteLock();
+
   public AbstractNativeRawDecoder(ErasureCoderOptions coderOptions) {
 super(coderOptions);
   }
 
   @Override
-  protected synchronized void doDecode(ByteBufferDecodingState decodingState)
+  protected void doDecode(ByteBufferDecodingState decodingState)
   throws IOException {
-if (nativeCoder == 0) {
-  throw new IOException(String.format("%s closed",
-  getClass().getSimpleName()));
-}
-int[] inputOffsets = new int[decodingState.inputs.length];
-int[] outputOffsets = new int[decodingState.outputs.length];
+decoderLock.readLock().lock();
+try {
+  if (nativeCoder == 0) {
+throw new IOException(String.format("%s closed",
+getClass().getSimpleName()));
+  }
+  int[] inputOffsets = new int[decodingState.inputs.length];
+  int[] outputOffsets = new int[decodingState.outputs.length];
 
-ByteBuffer buffer;
-for (int i = 0; i < decodingState.inputs.length; ++i) {
-  buffer = decodingState.inputs[i];
-  if (buffer != null) {
-inputOffsets[i] = buffer.position();
+  ByteBuffer buffer;
+  for (int i = 0; i < decodingState.inputs.length; ++i) {
+buffer = decodingState.inputs[i];
+if (buffer != null) {
+  inputOffsets[i] = buffer.position();
+}
   }
-}
 
-for (int i = 0; i < decodingState.outputs.length; ++i) {
-  buffer = decodingState.outputs[i];
-  outputOffsets[i] = buffer.position();
-}
+  for (int i = 0; i < decodingState.outputs.length; ++i) {
+buffer = decodingState.outputs[i];
+outputOffsets[i] = buffer.position();
+  }
 
-performDecodeImpl(decodingState.inputs, inputOffsets,
-decodingState.decodeLength, decodingState.erasedIndexes,
-decodingState.outputs, outputOffsets);
+  performDecodeImpl(decodingState.inputs, inputOffsets,
+  decodingState.decodeLength, decodingState.erasedIndexes,
+  decodingState.outputs, outputOffsets);
+} finally {
+  decoderLock.readLock().unlock();
+}
   }
 
   protected abstract void performDecodeImpl(ByteBuffer[] inputs,


[11/16] hadoop git commit: HDFS-13664. Refactor ConfiguredFailoverProxyProvider to make inheritance easier. Contributed by Chao Sun.

2018-06-11 Thread sunchao
HDFS-13664. Refactor ConfiguredFailoverProxyProvider to make inheritance 
easier. Contributed by Chao Sun.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fba1c42a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fba1c42a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fba1c42a

Branch: refs/heads/HDFS-12943
Commit: fba1c42adc1c8ae57951e1865ec2ab05c8707bdf
Parents: cf41083
Author: Chao Sun 
Authored: Fri Jun 8 16:36:42 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Jun 8 16:36:42 2018 -0700

--
 .../namenode/ha/ConfiguredFailoverProxyProvider.java  | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fba1c42a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index e9c8791..58f4943 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -52,11 +52,11 @@ public class ConfiguredFailoverProxyProvider extends
   protected final Configuration conf;
   protected final List> proxies =
   new ArrayList>();
-  private final UserGroupInformation ugi;
+  protected final UserGroupInformation ugi;
   protected final Class xface;
 
   private int currentProxyIndex = 0;
-  private final HAProxyFactory factory;
+  protected final HAProxyFactory factory;
 
   public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
   Class xface, HAProxyFactory factory) {
@@ -122,6 +122,10 @@ public class ConfiguredFailoverProxyProvider extends
   @Override
   public synchronized ProxyInfo getProxy() {
 AddressRpcProxyPair current = proxies.get(currentProxyIndex);
+return getProxy(current);
+  }
+
+  protected ProxyInfo getProxy(AddressRpcProxyPair current) {
 if (current.namenode == null) {
   try {
 current.namenode = factory.createProxy(conf,
@@ -147,7 +151,7 @@ public class ConfiguredFailoverProxyProvider extends
* A little pair object to store the address and connected RPC proxy object 
to
* an NN. Note that {@link AddressRpcProxyPair#namenode} may be null.
*/
-  private static class AddressRpcProxyPair {
+  protected static class AddressRpcProxyPair {
 public final InetSocketAddress address;
 public T namenode;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/16] hadoop git commit: HDDS-157. Upgrade common-langs version to 3.7 in HDDS and Ozone. Contributed by Takanobu Asanuma.

2018-06-11 Thread sunchao
HDDS-157. Upgrade common-langs version to 3.7 in HDDS and Ozone.
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1272448
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1272448
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1272448

Branch: refs/heads/HDFS-12943
Commit: a1272448bfa2f1a159d948b8635558e053b7be78
Parents: c42dcc7
Author: Anu Engineer 
Authored: Fri Jun 8 10:27:01 2018 -0700
Committer: Anu Engineer 
Committed: Fri Jun 8 10:27:01 2018 -0700

--
 .../org/apache/hadoop/hdds/scm/XceiverClientHandler.java | 2 +-
 .../src/main/java/org/apache/hadoop/hdds/client/BlockID.java | 2 +-
 .../common/states/endpoint/RegisterEndpointTask.java | 2 +-
 .../apache/hadoop/ozone/client/rest/response/KeyInfo.java| 4 ++--
 .../java/org/apache/hadoop/ozone/web/response/KeyInfo.java   | 4 ++--
 .../hadoop/ozone/TestStorageContainerManagerHelper.java  | 2 +-
 .../apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java   | 2 +-
 .../apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java | 2 +-
 .../org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java | 2 +-
 .../org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java  | 2 +-
 .../hadoop/ozone/ksm/TestMultipleContainerReadWrite.java | 2 +-
 .../java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java | 2 +-
 .../org/apache/hadoop/ozone/scm/TestAllocateContainer.java   | 2 +-
 .../apache/hadoop/ozone/scm/TestXceiverClientManager.java| 2 +-
 .../hadoop/ozone/web/TestOzoneRestWithMiniCluster.java   | 2 +-
 .../java/org/apache/hadoop/ozone/web/client/TestKeys.java| 8 
 .../org/apache/hadoop/ozone/web/client/TestKeysRatis.java| 2 +-
 .../java/org/apache/hadoop/ozone/web/client/TestVolume.java  | 2 +-
 .../java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java   | 2 +-
 .../src/main/java/org/apache/hadoop/ozone/freon/Freon.java   | 6 +++---
 .../hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java| 2 +-
 .../hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java| 2 +-
 .../hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java   | 2 +-
 .../apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java   | 2 +-
 .../java/org/apache/hadoop/ozone/genesis/GenesisUtil.java| 2 +-
 25 files changed, 32 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1272448/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
index 6a2286c..7c568f6 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
@@ -21,7 +21,7 @@ import com.google.common.base.Preconditions;
 import org.apache.ratis.shaded.io.netty.channel.Channel;
 import org.apache.ratis.shaded.io.netty.channel.ChannelHandlerContext;
 import org.apache.ratis.shaded.io.netty.channel.SimpleChannelInboundHandler;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1272448/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
index 7bf8f01..62b12e3 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
@@ -16,7 +16,7 @@
  */
 package org.apache.hadoop.hdds.client;
 
-import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1272448/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
--
diff --git 

[16/16] hadoop git commit: Merge branch 'upstream-trunk' into upstream-HDFS-12943

2018-06-11 Thread sunchao
Merge branch 'upstream-trunk' into upstream-HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9756c2cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9756c2cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9756c2cb

Branch: refs/heads/HDFS-12943
Commit: 9756c2cbe45b8be4cd16b962b01822d7b006f531
Parents: 990a221 18201b8
Author: Chao Sun 
Authored: Mon Jun 11 10:39:34 2018 -0700
Committer: Chao Sun 
Committed: Mon Jun 11 10:39:34 2018 -0700

--
 .../java/org/apache/hadoop/fs/FileUtil.java |   6 +
 .../rawcoder/AbstractNativeRawDecoder.java  |  51 +-
 .../rawcoder/AbstractNativeRawEncoder.java  |  49 +-
 .../rawcoder/NativeRSRawDecoder.java|  19 +-
 .../rawcoder/NativeRSRawEncoder.java|  19 +-
 .../rawcoder/NativeXORRawDecoder.java   |  19 +-
 .../rawcoder/NativeXORRawEncoder.java   |  19 +-
 .../java/org/apache/hadoop/fs/TestFileUtil.java |  52 ++
 .../rawcoder/RawErasureCoderBenchmark.java  |   6 +
 .../util/TestCloseableReferenceCount.java   |  91 ++
 .../hadoop/util/TestIntrusiveCollection.java| 193 
 .../hadoop/util/TestLimitInputStream.java   |  74 ++
 .../java/org/apache/hadoop/util/TestShell.java  |   8 +
 .../org/apache/hadoop/util/TestStringUtils.java |  27 +
 .../hadoop/util/TestUTF8ByteArrayUtils.java |  57 ++
 .../hadoop/hdds/scm/XceiverClientHandler.java   |   2 +-
 .../org/apache/hadoop/hdds/client/BlockID.java  |   2 +-
 .../states/endpoint/RegisterEndpointTask.java   |   2 +-
 .../container/CloseContainerEventHandler.java   |  83 ++
 .../hdds/scm/container/ContainerMapping.java|   5 +
 .../scm/container/ContainerStateManager.java|   9 +
 .../hadoop/hdds/scm/container/Mapping.java  |   6 +
 .../TestCloseContainerEventHandler.java | 177 
 .../ha/ConfiguredFailoverProxyProvider.java |  10 +-
 ...apache.hadoop.security.token.TokenIdentifier |  17 +
 ...rg.apache.hadoop.security.token.TokenRenewer |  16 +
 .../server/blockmanagement/DatanodeManager.java |   2 +-
 .../server/namenode/FSDirErasureCodingOp.java   |  23 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  10 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  21 +-
 ...apache.hadoop.security.token.TokenIdentifier |  17 -
 ...rg.apache.hadoop.security.token.TokenRenewer |  16 -
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   5 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  16 +
 .../hdfs/TestErasureCodingExerciseAPIs.java |   2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |   2 +-
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 7909 -> 7909 bytes
 .../src/test/resources/editsStored.xml  |   2 +-
 .../ozone/client/rest/response/KeyInfo.java |   4 +-
 .../hadoop/ozone/web/response/KeyInfo.java  |   4 +-
 hadoop-ozone/docs/content/CommandShell.md   | 141 +--
 hadoop-ozone/docs/content/GettingStarted.md | 353 +++
 .../TestStorageContainerManagerHelper.java  |   2 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|   2 +-
 .../ozone/ksm/TestContainerReportWithKeys.java  |   2 +-
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   |   2 +-
 .../ozone/ksm/TestKsmBlockVersioning.java   |   2 +-
 .../ksm/TestMultipleContainerReadWrite.java |   2 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java|   2 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java |   2 +-
 .../ozone/scm/TestXceiverClientManager.java |   2 +-
 .../ozone/web/TestOzoneRestWithMiniCluster.java |   2 +-
 .../hadoop/ozone/web/client/TestKeys.java   |   8 +-
 .../hadoop/ozone/web/client/TestKeysRatis.java  |   2 +-
 .../hadoop/ozone/web/client/TestVolume.java |   2 +-
 .../hadoop/ozone/ksm/TestChunkStreams.java  |   2 +-
 .../org/apache/hadoop/ozone/freon/Freon.java|   6 +-
 .../genesis/BenchMarkDatanodeDispatcher.java|   2 +-
 .../genesis/BenchMarkMetadataStoreReads.java|   2 +-
 .../genesis/BenchMarkMetadataStoreWrites.java   |   2 +-
 .../ozone/genesis/BenchMarkRocksDbStore.java|   2 +-
 .../hadoop/ozone/genesis/GenesisUtil.java   |   2 +-
 hadoop-project/pom.xml  |   2 +-
 .../hadoop-yarn-server-nodemanager/pom.xml  |  21 +
 .../containermanager/container/Container.java   |   8 +
 .../container/ContainerImpl.java|   5 +
 .../linux/resources/DefaultOOMHandler.java  | 249 +++--
 .../impl/container-executor.c   |  54 +-
 .../impl/container-executor.h   |   4 +
 .../main/native/container-executor/impl/main.c  |  19 +-
 .../linux/resources/TestDefaultOOMHandler.java  | 922 ---
 .../nodemanager/webapp/MockContainer.java   |   5 +
 .../src/site/markdown/Federation.md |  14 +-
 .../src/site/markdown/NodeManagerCgroups.md |   2 +-
 74 files changed, 2373 insertions(+), 619 deletions(-)

[13/16] hadoop git commit: HADOOP-15520. Add tests for various org.apache.hadoop.util classes. Contributed by Arash Nabili

2018-06-11 Thread sunchao
HADOOP-15520. Add tests for various org.apache.hadoop.util classes.
Contributed by Arash Nabili


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef0118b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef0118b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef0118b9

Branch: refs/heads/HDFS-12943
Commit: ef0118b91e384b9a6d96c2ae64480d9acf5aa6fb
Parents: 000a678
Author: Steve Loughran 
Authored: Sat Jun 9 15:33:30 2018 +0100
Committer: Steve Loughran 
Committed: Sat Jun 9 15:33:38 2018 +0100

--
 .../util/TestCloseableReferenceCount.java   |  91 +
 .../hadoop/util/TestIntrusiveCollection.java| 193 +++
 .../hadoop/util/TestLimitInputStream.java   |  74 +++
 .../java/org/apache/hadoop/util/TestShell.java  |   8 +
 .../org/apache/hadoop/util/TestStringUtils.java |  27 +++
 .../hadoop/util/TestUTF8ByteArrayUtils.java |  57 ++
 6 files changed, 450 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef0118b9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
new file mode 100644
index 000..31e1899
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.nio.channels.ClosedChannelException;
+
+import org.junit.Test;
+
+import org.apache.hadoop.test.HadoopTestBase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestCloseableReferenceCount extends HadoopTestBase {
+  @Test
+  public void testReference() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.reference();
+assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+  }
+
+  @Test
+  public void testUnreference() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.reference();
+clr.reference();
+assertFalse("New reference count should not equal STATUS_CLOSED_MASK",
+clr.unreference());
+assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+  }
+
+  @Test
+  public void testUnreferenceCheckClosed() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.reference();
+clr.reference();
+clr.unreferenceCheckClosed();
+assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+  }
+
+  @Test
+  public void testSetClosed() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+assertTrue("Reference count should be open", clr.isOpen());
+clr.setClosed();
+assertFalse("Reference count should be closed", clr.isOpen());
+  }
+
+  @Test(expected = ClosedChannelException.class)
+  public void testReferenceClosedReference() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.setClosed();
+assertFalse("Reference count should be closed", clr.isOpen());
+clr.reference();
+  }
+
+  @Test(expected = ClosedChannelException.class)
+  public void testUnreferenceClosedReference() throws ClosedChannelException {
+CloseableReferenceCount clr = new CloseableReferenceCount();
+clr.reference();
+clr.setClosed();
+assertFalse("Reference count should be closed", clr.isOpen());
+clr.unreferenceCheckClosed();
+  }
+
+  @Test(expected = ClosedChannelException.class)
+  public void testDoubleClose() throws 

[12/16] hadoop git commit: HDFS-12670. can't renew HDFS tokens with only the hdfs client jar. Contributed by Arpit Agarwal.

2018-06-11 Thread sunchao
HDFS-12670. can't renew HDFS tokens with only the hdfs client jar. Contributed 
by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/000a6783
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/000a6783
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/000a6783

Branch: refs/heads/HDFS-12943
Commit: 000a67839666bf7cb39d3955757bb05fa95f1b18
Parents: fba1c42
Author: Arpit Agarwal 
Authored: Fri Jun 8 17:57:12 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Jun 8 17:57:12 2018 -0700

--
 ...rg.apache.hadoop.security.token.TokenIdentifier | 17 +
 .../org.apache.hadoop.security.token.TokenRenewer  | 16 
 ...rg.apache.hadoop.security.token.TokenIdentifier | 17 -
 .../org.apache.hadoop.security.token.TokenRenewer  | 16 
 4 files changed, 33 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/000a6783/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 000..b6b6171
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,17 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier$WebHdfsDelegationTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier$SWebHdfsDelegationTokenIdentifier

http://git-wip-us.apache.org/repos/asf/hadoop/blob/000a6783/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
new file mode 100644
index 000..7efd684
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
@@ -0,0 +1,16 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.hdfs.DFSClient$Renewer
+org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer
+org.apache.hadoop.hdfs.web.TokenAspect$TokenManager

http://git-wip-us.apache.org/repos/asf/hadoop/blob/000a6783/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
deleted file mode 100644
index b6b6171..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-#   Licensed under the Apache License, Version 2.0 (the 

[10/16] hadoop git commit: HDFS-13642. Creating a file with block size smaller than EC policy's cell size should fail.

2018-06-11 Thread sunchao
HDFS-13642. Creating a file with block size smaller than EC policy's cell size 
should fail.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf410831
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf410831
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf410831

Branch: refs/heads/HDFS-12943
Commit: cf4108313da83e28d07676078a33016ec8856ff6
Parents: a127244
Author: Xiao Chen 
Authored: Fri Jun 8 15:13:38 2018 -0700
Committer: Xiao Chen 
Committed: Fri Jun 8 15:14:11 2018 -0700

--
 .../server/namenode/FSDirErasureCodingOp.java   |  23 +++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  10 ++--
 .../hdfs/server/namenode/FSNamesystem.java  |  21 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   5 ++--
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  16 +
 .../hdfs/TestErasureCodingExerciseAPIs.java |   2 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |   2 +-
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 7909 -> 7909 bytes
 .../src/test/resources/editsStored.xml  |   2 +-
 9 files changed, 58 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf410831/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 3a32db4..7160b86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.XAttr;
@@ -344,16 +345,28 @@ final class FSDirErasureCodingOp {
   }
 
   /**
-   * Check if the file or directory has an erasure coding policy.
+   * Get the erasure coding policy information for specified path and policy
+   * name. If ec policy name is given, it will be parsed and the corresponding
+   * policy will be returned. Otherwise, get the policy from the parents of the
+   * iip.
*
* @param fsn namespace
+   * @param ecPolicyName the ec policy name
* @param iip inodes in the path containing the file
-   * @return Whether the file or directory has an erasure coding policy.
+   * @return {@link ErasureCodingPolicy}, or null if no policy is found
* @throws IOException
*/
-  static boolean hasErasureCodingPolicy(final FSNamesystem fsn,
-  final INodesInPath iip) throws IOException {
-return unprotectedGetErasureCodingPolicy(fsn, iip) != null;
+  static ErasureCodingPolicy getErasureCodingPolicy(FSNamesystem fsn,
+  String ecPolicyName, INodesInPath iip) throws IOException {
+ErasureCodingPolicy ecPolicy;
+if (!StringUtils.isEmpty(ecPolicyName)) {
+  ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
+  fsn, ecPolicyName);
+} else {
+  ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
+  fsn, iip);
+}
+return ecPolicy;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf410831/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 8f34e1c..03c349c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.AddBlockFlag;
@@ -543,13 +542,8 @@ class FSDirWriteFileOp {
   boolean isStriped = false;
   ErasureCodingPolicy ecPolicy = null;
   if (!shouldReplicate) {
-  

[02/16] hadoop git commit: HDDS-147. Update Ozone site docs. Contributed by Arpit Agarwal.

2018-06-11 Thread sunchao
HDDS-147. Update Ozone site docs. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba303b1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba303b1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba303b1f

Branch: refs/heads/HDFS-12943
Commit: ba303b1f890ccd4deb806cb030e26a77e316ebe4
Parents: 12be8ba
Author: Arpit Agarwal 
Authored: Thu Jun 7 14:10:52 2018 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 7 14:11:20 2018 -0700

--
 hadoop-ozone/docs/content/CommandShell.md   | 141 -
 hadoop-ozone/docs/content/GettingStarted.md | 353 ---
 2 files changed, 254 insertions(+), 240 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba303b1f/hadoop-ozone/docs/content/CommandShell.md
--
diff --git a/hadoop-ozone/docs/content/CommandShell.md 
b/hadoop-ozone/docs/content/CommandShell.md
index d8a733a..95820e9 100644
--- a/hadoop-ozone/docs/content/CommandShell.md
+++ b/hadoop-ozone/docs/content/CommandShell.md
@@ -15,139 +15,144 @@ menu: main
   See the License for the specific language governing permissions and
   limitations under the License. See accompanying LICENSE file.
 -->
-Ozone Command Shell
-===
+# Ozone Command Shell
 
-Ozone command shell gives a command shell interface to work against ozone.
+
+Ozone command shell gives a command shell interface to work against Ozone.
 Please note that this  document assumes that cluster is deployed
 with simple authentication.
 
 The Ozone commands take the following format.
+```
+ozone oz --command_ /volume/bucket/key -user  [-root]
+```
 
-* `ozone oz --command_ http://hostname:port/volume/bucket/key -user
- -root`
-
-The *port* specified in command should match the port mentioned in the config
+The `port` specified in command should match the port mentioned in the config
 property `hdds.rest.http-address`. This property can be set in 
`ozone-site.xml`.
 The default value for the port is `9880` and is used in below commands.
 
-The *-root* option is a command line short cut that allows *ozone oz*
+The `-root` option is a command line short cut that allows *ozone oz*
 commands to be run as the user that started the cluster. This is useful to
 indicate that you want the commands to be run as some admin user. The only
 reason for this option is that it makes the life of a lazy developer more
 easier.
 
-Ozone Volume Commands
-
+## Volume Commands
+
 
 The volume commands allow users to create, delete and list the volumes in the
 ozone cluster.
 
 ### Create Volume
-
-Volumes can be created only by Admins. Here is an example of creating a volume.
-
-* `ozone oz -createVolume http://localhost:9880/hive -user bilbo -quota
-100TB -root`
-
+Volumes can be created only by administrators. Here is an example of creating 
a volume.
+```
+ozone oz -createVolume hive -user bilbo -quota 100TB -root
+```
 The above command creates a volume called `hive` owned by user `bilbo`. The
 `-root` option allows the command to be executed as user `hdfs` which is an
 admin in the cluster.
 
 ### Update Volume
-
 Updates information like ownership and quota on an existing volume.
-
-* `ozone oz  -updateVolume  http://localhost:9880/hive -quota 500TB -root`
+```
+ozone oz -updateVolume hive -quota 500TB -root
+```
 
 The above command changes the volume quota of hive from 100TB to 500TB.
 
 ### Delete Volume
 Deletes a Volume if it is empty.
-
-* `ozone oz -deleteVolume http://localhost:9880/hive -root`
-
+```
+ozone oz -deleteVolume /hive -root
+```
 
 ### Info Volume
-Info volume command allows the owner or the administrator of the cluster to 
read meta-data about a specific volume.
-
-* `ozone oz -infoVolume http://localhost:9880/hive -root`
+Info volume command allows the owner or the administrator of the cluster
+to read meta-data about a specific volume.
+```
+ozone oz -infoVolume /hive -root
+```
 
 ### List Volumes
-
-List volume command can be used by administrator to list volumes of any user. 
It can also be used by a user to list volumes owned by him.
-
-* `ozone oz -listVolume http://localhost:9880/ -user bilbo -root`
+List volume command can be used by administrator to list volumes of any
+user. It can also be used by any user to list their own volumes.
+```
+ozone oz -listVolume / -user bilbo
+```
 
 The above command lists all volumes owned by user bilbo.
 
-Ozone Bucket Commands
-
-
-Bucket commands follow a similar pattern as volume commands. However bucket 
commands are designed to be run by the owner of the volume.
-Following examples assume that these commands are run by the owner of the 
volume or bucket.
+## Bucket Commands
 
+Bucket commands follow 

[08/16] hadoop git commit: HADOOP-15482. Upgrade jackson-databind to version 2.9.5. Contributed by Lokesh Jain.

2018-06-11 Thread sunchao
HADOOP-15482. Upgrade jackson-databind to version 2.9.5. Contributed by Lokesh 
Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c42dcc7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c42dcc7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c42dcc7c

Branch: refs/heads/HDFS-12943
Commit: c42dcc7c47340d517563890269c6c112996e8897
Parents: 3b88fe2
Author: Jitendra Pandey 
Authored: Thu Jun 7 23:00:26 2018 -0700
Committer: Jitendra Pandey 
Committed: Thu Jun 7 23:00:26 2018 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c42dcc7c/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8edfd76..8cb5bfc 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -69,7 +69,7 @@
 
 
 1.9.13
-2.9.4
+2.9.5
 
 
 1.7.25


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/16] hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

2018-06-11 Thread sunchao
Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/351cf87c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/351cf87c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/351cf87c

Branch: refs/heads/HDFS-12943
Commit: 351cf87c92872d90f62c476f85ae4d02e485769c
Parents: d5eca1a
Author: Robert Kanter 
Authored: Thu Jun 7 17:09:34 2018 -0700
Committer: Robert Kanter 
Committed: Thu Jun 7 17:09:34 2018 -0700

--
 .../impl/container-executor.c   | 54 ++--
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 19 ---
 .../src/site/markdown/NodeManagerCgroups.md |  2 +-
 4 files changed, 55 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/351cf87c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 1b8842a..baf0e8b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -73,6 +73,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"yarn", 
"mapred", "hdfs", "bin", 0}
 
 static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0;
 static const int DEFAULT_TC_SUPPORT_ENABLED = 0;
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
 
 static const char* PROC_PATH = "/proc";
 
@@ -482,6 +483,12 @@ int is_tc_support_enabled() {
 DEFAULT_TC_SUPPORT_ENABLED, _cfg);
 }
 
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED,
+  _cfg);
+}
+
 /**
  * Utility function to concatenate argB to argA using the concat_pattern.
  */
@@ -2346,20 +2353,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
 while ((ep = readdir(dp)) != NULL) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -2383,11 +2395,16 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(len);
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
-  struct stat sb;
 
   if (controller == NULL || mount_path == NULL) {
 fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n");
 result = OUT_OF_MEMORY;
+goto cleanup;
+  }
+  if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) {
+fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n");
+result = INVALID_COMMAND_PROVIDED;
+goto cleanup;
   }
   if (get_kv_key(pair, controller, len) < 0 ||
   get_kv_value(pair, mount_path, len) < 0) {
@@ -2395,13 +2412,10 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   pair);
 result = -1;
   } else {
-if (stat(mount_path, ) != 0) {
-  // Create mount point, if it does not exist
-  const mode_t mount_perms = S_IRWXU | S_IRGRP | S_IXGRP;
-  if (mkdirs(mount_path, mount_perms) == 0) {
-fprintf(LOGFILE, "Failed to create cgroup mount point %s at %s\n",
-  controller, mount_path);
-  }
+if (strstr(mount_path, "..") != NULL) {
+  fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+  result = 

[14/16] hadoop git commit: HDFS-13667:Typo: Marking all datandoes as stale. Contributed by Nanda Kumar

2018-06-11 Thread sunchao
HDFS-13667:Typo: Marking all datandoes as stale. Contributed by Nanda Kumar


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccfb816d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccfb816d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccfb816d

Branch: refs/heads/HDFS-12943
Commit: ccfb816d39878abf4172933327d788c59b9eb082
Parents: ef0118b
Author: Bharat Viswanadham 
Authored: Sat Jun 9 16:39:09 2018 -0700
Committer: Bharat Viswanadham 
Committed: Sat Jun 9 16:43:03 2018 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccfb816d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index e6cd513..9ebc693 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1834,7 +1834,7 @@ public class DatanodeManager {
   }
   
   public void markAllDatanodesStale() {
-LOG.info("Marking all datandoes as stale");
+LOG.info("Marking all datanodes as stale");
 synchronized (this) {
   for (DatanodeDescriptor dn : datanodeMap.values()) {
 for(DatanodeStorageInfo storage : dn.getStorageInfos()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-133:Change format of .container files to Yaml. Contributed by Bharat Viswanadham

2018-06-11 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/HDDS-48 772c95395 -> 143dd560b


HDDS-133:Change format of .container files to Yaml. Contributed by Bharat 
Viswanadham


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/143dd560
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/143dd560
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/143dd560

Branch: refs/heads/HDDS-48
Commit: 143dd560bf506fafd849aeb47a42becc6c13330d
Parents: 772c953
Author: Bharat Viswanadham 
Authored: Mon Jun 11 09:04:54 2018 -0700
Committer: Bharat Viswanadham 
Committed: Mon Jun 11 09:04:54 2018 -0700

--
 hadoop-hdds/container-service/pom.xml   |   6 +
 .../common/impl/ChunkLayOutVersion.java |  18 ++
 .../container/common/impl/ContainerData.java|  28 +-
 .../common/impl/KeyValueContainerData.java  |  19 +-
 .../container/common/impl/KeyValueYaml.java | 274 +++
 .../common/TestKeyValueContainerData.java   |  15 +-
 .../container/common/impl/TestKeyValueYaml.java | 158 +++
 .../test/resources/additionalfields.container   |   9 +
 .../src/test/resources/incorrect.container  |  10 +
 hadoop-hdds/pom.xml |   2 +
 10 files changed, 521 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/143dd560/hadoop-hdds/container-service/pom.xml
--
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
index 542462e..43f400c 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -53,6 +53,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
 
 
+  org.yaml
+  snakeyaml
+  1.8
+
+
+
   io.dropwizard.metrics
   metrics-core
   test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/143dd560/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
index fff68de6..d1b1bd6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.ozone.container.common.impl;
 
 
+import com.google.common.base.Preconditions;
+
 /**
  * Defines layout versions for the Chunks.
  */
@@ -43,6 +45,22 @@ public final class ChunkLayOutVersion {
   }
 
   /**
+   * Return ChunkLayOutVersion object for the chunkVersion.
+   * @param chunkVersion
+   * @return ChunkLayOutVersion
+   */
+  public static ChunkLayOutVersion getChunkLayOutVersion(int chunkVersion) {
+Preconditions.checkArgument((chunkVersion <= ChunkLayOutVersion
+.getLatestVersion().getVersion()));
+for(ChunkLayOutVersion chunkLayOutVersion : CHUNK_LAYOUT_VERSION_INFOS) {
+  if(chunkLayOutVersion.getVersion() == chunkVersion) {
+return chunkLayOutVersion;
+  }
+}
+return null;
+  }
+
+  /**
* Returns all versions.
*
* @return Version info array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/143dd560/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 06aae66..0bd7795 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -42,7 +42,7 @@ public class ContainerData {
   private final long containerId;
 
   // Layout version of the container data
-  private final ChunkLayOutVersion layOutVersion;
+  private final int layOutVersion;
 
   // Metadata of the container will be a key value pair.
   // This can hold information like volume name, owner etc.,
@@ -67,7 +67,27 @@ public class ContainerData {
   public ContainerData(ContainerType type, long containerId) {
 this.containerType = type;
 this.containerId = containerId;
-this.layOutVersion = ChunkLayOutVersion.getLatestVersion();

hadoop git commit: HADOOP-15499. Performance severe drops when running RawErasureCoderBenchmark with NativeRSRawErasureCoder. Contributed by Sammi Chen.

2018-06-11 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c0d46a84a -> e3c96354a


HADOOP-15499. Performance severe drops when running RawErasureCoderBenchmark 
with NativeRSRawErasureCoder. Contributed by Sammi Chen.

(cherry picked from commit 18201b882a38ad875358c5d23c09b0ef903c2f91)
(cherry picked from commit b8741102758f70e79eb4043b71433560f5ca713e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3c96354
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3c96354
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3c96354

Branch: refs/heads/branch-3.1
Commit: e3c96354a749f50038c7604fcc3fb23ecf262add
Parents: c0d46a8
Author: Sammi Chen 
Authored: Mon Jun 11 13:53:37 2018 +0800
Committer: Sammi Chen 
Committed: Mon Jun 11 14:03:39 2018 +0800

--
 .../rawcoder/AbstractNativeRawDecoder.java  | 51 
 .../rawcoder/AbstractNativeRawEncoder.java  | 49 +++
 .../rawcoder/NativeRSRawDecoder.java| 19 ++--
 .../rawcoder/NativeRSRawEncoder.java| 19 ++--
 .../rawcoder/NativeXORRawDecoder.java   | 19 ++--
 .../rawcoder/NativeXORRawEncoder.java   | 19 ++--
 .../rawcoder/RawErasureCoderBenchmark.java  |  6 +++
 7 files changed, 127 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3c96354/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
index e845747..cb71a80 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
@@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * Abstract native raw decoder for all native coders to extend with.
@@ -34,36 +35,46 @@ abstract class AbstractNativeRawDecoder extends 
RawErasureDecoder {
   public static Logger LOG =
   LoggerFactory.getLogger(AbstractNativeRawDecoder.class);
 
+  // Protect ISA-L coder data structure in native layer from being accessed and
+  // updated concurrently by the init, release and decode functions.
+  protected final ReentrantReadWriteLock decoderLock =
+  new ReentrantReadWriteLock();
+
   public AbstractNativeRawDecoder(ErasureCoderOptions coderOptions) {
 super(coderOptions);
   }
 
   @Override
-  protected synchronized void doDecode(ByteBufferDecodingState decodingState)
+  protected void doDecode(ByteBufferDecodingState decodingState)
   throws IOException {
-if (nativeCoder == 0) {
-  throw new IOException(String.format("%s closed",
-  getClass().getSimpleName()));
-}
-int[] inputOffsets = new int[decodingState.inputs.length];
-int[] outputOffsets = new int[decodingState.outputs.length];
+decoderLock.readLock().lock();
+try {
+  if (nativeCoder == 0) {
+throw new IOException(String.format("%s closed",
+getClass().getSimpleName()));
+  }
+  int[] inputOffsets = new int[decodingState.inputs.length];
+  int[] outputOffsets = new int[decodingState.outputs.length];
 
-ByteBuffer buffer;
-for (int i = 0; i < decodingState.inputs.length; ++i) {
-  buffer = decodingState.inputs[i];
-  if (buffer != null) {
-inputOffsets[i] = buffer.position();
+  ByteBuffer buffer;
+  for (int i = 0; i < decodingState.inputs.length; ++i) {
+buffer = decodingState.inputs[i];
+if (buffer != null) {
+  inputOffsets[i] = buffer.position();
+}
   }
-}
 
-for (int i = 0; i < decodingState.outputs.length; ++i) {
-  buffer = decodingState.outputs[i];
-  outputOffsets[i] = buffer.position();
-}
+  for (int i = 0; i < decodingState.outputs.length; ++i) {
+buffer = decodingState.outputs[i];
+outputOffsets[i] = buffer.position();
+  }
 
-performDecodeImpl(decodingState.inputs, inputOffsets,
-decodingState.decodeLength, decodingState.erasedIndexes,
-decodingState.outputs, outputOffsets);
+  performDecodeImpl(decodingState.inputs, inputOffsets,
+  decodingState.decodeLength, decodingState.erasedIndexes,
+  decodingState.outputs, outputOffsets);
+} finally {
+