hadoop git commit: YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer binds to default port 8188. Contributed by Varun Saxena.

2015-11-06 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 0b55634b9 -> 6b27de0f3


YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no longer 
binds to default port 8188. Contributed by Varun Saxena.

(cherry picked from commit ce31b22739512804da38cf87e0ce1059e3128da3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b27de0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b27de0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b27de0f

Branch: refs/heads/branch-2.6
Commit: 6b27de0f361aceeb0feaad2f0bacfc35865c31d7
Parents: 0b55634
Author: Tsuyoshi Ozawa 
Authored: Fri Oct 30 17:51:39 2015 +0900
Committer: Sangjin Lee 
Committed: Fri Nov 6 00:19:41 2015 -0800

--
 .../mapreduce/jobhistory/TestJobHistoryEventHandler.java  | 10 +++---
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../ApplicationHistoryServer.java |  6 ++
 3 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b27de0f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index de260c9..6e43c1b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -446,9 +446,6 @@ public class TestJobHistoryEventHandler {
 TestParams t = new TestParams(false);
 Configuration conf = new YarnConfiguration();
 conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
-JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
-jheh.init(conf);
 MiniYARNCluster yarnCluster = null;
 long currentTime = System.currentTimeMillis();
 try {
@@ -456,6 +453,13 @@ public class TestJobHistoryEventHandler {
 TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1);
   yarnCluster.init(conf);
   yarnCluster.start();
+  Configuration confJHEH = new YarnConfiguration(conf);
+  confJHEH.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+  confJHEH.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+  MiniYARNCluster.getHostname() + ":" +
+  yarnCluster.getApplicationHistoryServer().getPort());
+  JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 
0);
+  jheh.init(confJHEH);
   jheh.start();
   TimelineStore ts = yarnCluster.getApplicationHistoryServer()
   .getTimelineStore();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b27de0f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5344e75..123c3e3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -18,6 +18,9 @@ Release 2.6.3 - UNRELEASED
 YARN-4312. TestSubmitApplicationWithRMHA fails on branch-2.7 and branch-2.6
 as some of the test cases time out. (Varun Saxena via ozawa)
 
+YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no 
longer
+binds to default port 8188. (Varun Saxena via ozawa)
+
 Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b27de0f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 55641ea..a5f3ae6 100644
--- 
a/hadoop-yarn-pro

hadoop git commit: HADOOP-12413. AccessControlList should avoid calling getGroupNames in isUserInList with empty groups. Contributed by Zhihai Xu. (cherry picked from commit b2017d9b032af20044fdf60ddb

2015-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 616ed9084 -> 098c2df0c


HADOOP-12413. AccessControlList should avoid calling getGroupNames in 
isUserInList with empty groups. Contributed by Zhihai Xu.
(cherry picked from commit b2017d9b032af20044fdf60ddbd1575a554ccb79)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/098c2df0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/098c2df0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/098c2df0

Branch: refs/heads/branch-2.7
Commit: 098c2df0c09b0b24121a8d4663168a5f58799aef
Parents: 616ed90
Author: cnauroth 
Authored: Tue Sep 15 10:41:50 2015 -0700
Committer: Jason Lowe 
Committed: Fri Nov 6 16:54:55 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 .../apache/hadoop/security/authorize/AccessControlList.java | 2 +-
 .../hadoop/security/authorize/TestAccessControlList.java| 9 +
 3 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/098c2df0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 561d8a2..a54bb39 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.7.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12413. AccessControlList should avoid calling getGroupNames in
+isUserInList with empty groups. (Zhihai Xu via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/098c2df0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
index f19776f..b1b474b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
@@ -230,7 +230,7 @@ public class AccessControlList implements Writable {
   public final boolean isUserInList(UserGroupInformation ugi) {
 if (allAllowed || users.contains(ugi.getShortUserName())) {
   return true;
-} else {
+} else if (!groups.isEmpty()) {
   for(String group: ugi.getGroupNames()) {
 if (groups.contains(group)) {
   return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/098c2df0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
index 926e3b9..82942fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
@@ -37,6 +37,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Test;
 
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public class TestAccessControlList {
@@ -449,6 +453,11 @@ public class TestAccessControlList {
 assertUserAllowed(susan, acl);
 assertUserAllowed(barbara, acl);
 assertUserAllowed(ian, acl);
+
+acl = new AccessControlList("");
+UserGroupInformation spyUser = spy(drwho);
+acl.isUserAllowed(spyUser);
+verify(spyUser, never()).getGroupNames();
   }
 
   private void assertUserAllowed(UserGroupInformation ugi,



hadoop git commit: YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer binds to default port 8188. (Meng Ding via wangda)

2015-11-06 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 098c2df0c -> f7deb6b47


YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer 
binds to default port 8188. (Meng Ding via wangda)

(cherry picked from commit 0783184f4b3f669f7211e42b395b62d63144100d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7deb6b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7deb6b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7deb6b4

Branch: refs/heads/branch-2.7
Commit: f7deb6b472f83fa81902bce849e0c9bb5ffa7b37
Parents: 098c2df
Author: Wangda Tan 
Authored: Tue Nov 3 11:18:34 2015 -0800
Committer: Sangjin Lee 
Committed: Fri Nov 6 09:07:27 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../applications/distributedshell/TestDistributedShell.java | 9 +++--
 2 files changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7deb6b4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 17974e1..fa1ec82 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -12,6 +12,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no 
longer 
+binds to default port 8188. (Meng Ding via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7deb6b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 967d172..47b9dfb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -89,9 +89,14 @@ public class TestDistributedShell {
   yarnCluster.init(conf);
   
   yarnCluster.start();
-  
+
+  conf.set(
+  YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+  MiniYARNCluster.getHostname() + ":"
+  + yarnCluster.getApplicationHistoryServer().getPort());
+
   waitForNMsToRegister();
-  
+
   URL url = 
Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
   if (url == null) {
 throw new RuntimeException("Could not find 'yarn-site.xml' dummy file 
in classpath");



hadoop git commit: YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer binds to default port 8188. (Meng Ding via wangda)

2015-11-06 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 6b27de0f3 -> fb7be09f2


YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no longer 
binds to default port 8188. (Meng Ding via wangda)

(cherry picked from commit 0783184f4b3f669f7211e42b395b62d63144100d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb7be09f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb7be09f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb7be09f

Branch: refs/heads/branch-2.6
Commit: fb7be09f20a7d932d3055b38b1bd47c7a8f8543e
Parents: 6b27de0
Author: Wangda Tan 
Authored: Tue Nov 3 11:18:34 2015 -0800
Committer: Sangjin Lee 
Committed: Fri Nov 6 09:10:12 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../applications/distributedshell/TestDistributedShell.java | 9 +++--
 2 files changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb7be09f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 123c3e3..3032e19 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -21,6 +21,9 @@ Release 2.6.3 - UNRELEASED
 YARN-4320. TestJobHistoryEventHandler fails as AHS in MiniYarnCluster no 
longer
 binds to default port 8188. (Varun Saxena via ozawa)
 
+YARN-4326. Fix TestDistributedShell timeout as AHS in MiniYarnCluster no 
longer 
+binds to default port 8188. (Meng Ding via wangda)
+
 Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb7be09f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 904ad58..ec795bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -88,9 +88,14 @@ public class TestDistributedShell {
   yarnCluster.init(conf);
   
   yarnCluster.start();
-  
+
+  conf.set(
+  YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+  MiniYARNCluster.getHostname() + ":"
+  + yarnCluster.getApplicationHistoryServer().getPort());
+
   waitForNMsToRegister();
-  
+
   URL url = 
Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
   if (url == null) {
 throw new RuntimeException("Could not find 'yarn-site.xml' dummy file 
in classpath");



hadoop git commit: HADOOP-12413. AccessControlList should avoid calling getGroupNames in isUserInList with empty groups. Contributed by Zhihai Xu. (cherry picked from commit b2017d9b032af20044fdf60ddb

2015-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 fb7be09f2 -> cab916cc7


HADOOP-12413. AccessControlList should avoid calling getGroupNames in 
isUserInList with empty groups. Contributed by Zhihai Xu.
(cherry picked from commit b2017d9b032af20044fdf60ddbd1575a554ccb79)

Conflicts:

hadoop-common-project/hadoop-common/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cab916cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cab916cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cab916cc

Branch: refs/heads/branch-2.6
Commit: cab916cc7bbb5b232f1d10fb3b50464eb96dd76b
Parents: fb7be09
Author: Jason Lowe 
Authored: Fri Nov 6 17:11:33 2015 +
Committer: Jason Lowe 
Committed: Fri Nov 6 17:12:19 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 .../apache/hadoop/security/authorize/AccessControlList.java | 2 +-
 .../hadoop/security/authorize/TestAccessControlList.java| 9 +
 3 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab916cc/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 98d4bbd..4ed93f7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.6.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12413. AccessControlList should avoid calling getGroupNames in
+isUserInList with empty groups. (Zhihai Xu via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab916cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
index f19776f..b1b474b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
@@ -230,7 +230,7 @@ public class AccessControlList implements Writable {
   public final boolean isUserInList(UserGroupInformation ugi) {
 if (allAllowed || users.contains(ugi.getShortUserName())) {
   return true;
-} else {
+} else if (!groups.isEmpty()) {
   for(String group: ugi.getGroupNames()) {
 if (groups.contains(group)) {
   return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cab916cc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
index 926e3b9..82942fc 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
@@ -37,6 +37,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Test;
 
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public class TestAccessControlList {
@@ -449,6 +453,11 @@ public class TestAccessControlList {
 assertUserAllowed(susan, acl);
 assertUserAllowed(barbara, acl);
 assertUserAllowed(ian, acl);
+
+acl = new AccessControlList("");
+UserGroupInformation spyUser = spy(drwho);
+acl.isUserAllowed(spyUser);
+verify(spyUser, never()).getGroupNames();
   }
 
   private void assertUserAllowed(UserGroupInformation ugi,



hadoop git commit: Update CHANGES.txt for commit of HADOOP 12413 to 2.7.3 and 2.6.3

2015-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 66c096731 -> 74a9a5188


Update CHANGES.txt for commit of HADOOP 12413 to 2.7.3 and 2.6.3


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74a9a518
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74a9a518
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74a9a518

Branch: refs/heads/trunk
Commit: 74a9a51886c4379eefa539912932bfb99d5fd883
Parents: 66c0967
Author: Jason Lowe 
Authored: Fri Nov 6 17:16:14 2015 +
Committer: Jason Lowe 
Committed: Fri Nov 6 17:16:14 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74a9a518/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 25f0a8f..b20a1f6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -895,9 +895,6 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12324. Better exception reporting in SaslPlainServer.
 (Mike Yoder via stevel)
 
-HADOOP-12413. AccessControlList should avoid calling getGroupNames in
-isUserInList with empty groups. (Zhihai Xu via cnauroth)
-
 HADOOP-12404. Disable caching for JarURLConnection to avoid sharing
 JarFile with other users when loading resource from URL in Configuration
 class. (zxu)
@@ -1422,6 +1419,9 @@ Release 2.7.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12413. AccessControlList should avoid calling getGroupNames in
+isUserInList with empty groups. (Zhihai Xu via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -2210,6 +2210,9 @@ Release 2.6.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12413. AccessControlList should avoid calling getGroupNames in
+isUserInList with empty groups. (Zhihai Xu via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES



hadoop git commit: Update CHANGES.txt for commit of HADOOP 12413 to 2.7.3 and 2.6.3 (cherry picked from commit 74a9a51886c4379eefa539912932bfb99d5fd883)

2015-11-06 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 75bcc8bcd -> 836bfdd64


Update CHANGES.txt for commit of HADOOP 12413 to 2.7.3 and 2.6.3
(cherry picked from commit 74a9a51886c4379eefa539912932bfb99d5fd883)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/836bfdd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/836bfdd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/836bfdd6

Branch: refs/heads/branch-2
Commit: 836bfdd646c9b238be7b4db431b6e56a10fe64c5
Parents: 75bcc8b
Author: Jason Lowe 
Authored: Fri Nov 6 17:16:14 2015 +
Committer: Jason Lowe 
Committed: Fri Nov 6 17:18:23 2015 +

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/836bfdd6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 00af4bb..d30f0d5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -276,9 +276,6 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12324. Better exception reporting in SaslPlainServer.
 (Mike Yoder via stevel)
 
-HADOOP-12413. AccessControlList should avoid calling getGroupNames in
-isUserInList with empty groups. (Zhihai Xu via cnauroth)
-
 HADOOP-12404. Disable caching for JarURLConnection to avoid sharing
 JarFile with other users when loading resource from URL in Configuration
 class. (zxu)
@@ -806,6 +803,9 @@ Release 2.7.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12413. AccessControlList should avoid calling getGroupNames in
+isUserInList with empty groups. (Zhihai Xu via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -1621,6 +1621,9 @@ Release 2.6.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12413. AccessControlList should avoid calling getGroupNames in
+isUserInList with empty groups. (Zhihai Xu via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES



hadoop git commit: Revert "HADOOP-11684. S3a to use thread pool that blocks clients. (Thomas Demoor and Aaron Fabbri via lei)"

2015-11-06 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 836bfdd64 -> 47941858a


Revert "HADOOP-11684. S3a to use thread pool that blocks clients. (Thomas 
Demoor and Aaron Fabbri via lei)"

This reverts commit 01ae30796d0a120e4317e819533fb52bdae76885.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47941858
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47941858
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47941858

Branch: refs/heads/branch-2
Commit: 47941858af3411c49170a69c6a9ba406fb816516
Parents: 836bfdd
Author: Lei Xu 
Authored: Fri Nov 6 09:52:21 2015 -0800
Committer: Lei Xu 
Committed: Fri Nov 6 09:52:21 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 -
 .../src/main/resources/core-default.xml |  10 +-
 .../s3a/BlockingThreadPoolExecutorService.java  | 274 ---
 .../org/apache/hadoop/fs/s3a/Constants.java |  13 +-
 .../hadoop/fs/s3a/S3AFastOutputStream.java  |   4 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  82 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  10 +-
 .../TestBlockingThreadPoolExecutorService.java  | 182 
 .../fs/s3a/TestS3ABlockingThreadPool.java   |  80 --
 9 files changed, 97 insertions(+), 561 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47941858/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d30f0d5..e75693e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -326,9 +326,6 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11685. StorageException complaining " no lease ID" during HBase
 distributed log splitting (Duo Xu via cnauroth)
 
-HADOOP-11684. S3a to use thread pool that blocks clients. (Thomas Demoor
-and Aaron Fabbri via lei)
-
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47941858/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 5607489..fc09ddf 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -844,12 +844,18 @@ for ldap providers in the same way as above does.
 
 
   fs.s3a.threads.max
-  10
+  256
Maximum number of concurrent active (part)uploads,
 which each use a thread from the threadpool.
 
 
 
+  fs.s3a.threads.core
+  15
+  Number of core threads in the threadpool.
+
+
+
   fs.s3a.threads.keepalivetime
   60
   Number of seconds a thread can be idle before being
@@ -858,7 +864,7 @@ for ldap providers in the same way as above does.
 
 
   fs.s3a.max.total.tasks
-  5
+  1000
   Number of (part)uploads allowed to the queue before
 blocking additional uploads.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47941858/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
deleted file mode 100644
index 3baf6fc..000
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BlockingThreadPoolExecutorService.java
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.util.Collection;
-import java.util.

[1/3] hadoop git commit: HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the length of storageIDs. (Contributed by szetszwo)

2015-11-06 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 47941858a -> 33908c6f5
  refs/heads/branch-2.7 f7deb6b47 -> 63ed399e1
  refs/heads/trunk 74a9a5188 -> 0b18e5e8c


HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the length of 
storageIDs. (Contributed by szetszwo)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b18e5e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b18e5e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b18e5e8

Branch: refs/heads/trunk
Commit: 0b18e5e8c69b40c9a446fff448d38e0dd10cb45e
Parents: 74a9a51
Author: Arpit Agarwal 
Authored: Fri Nov 6 10:13:22 2015 -0800
Committer: Arpit Agarwal 
Committed: Fri Nov 6 10:15:23 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/DatanodeManager.java | 14 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 36 +++-
 .../TestCommitBlockSynchronization.java |  4 +--
 4 files changed, 36 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b18e5e8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index af6723f..63a99c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2346,6 +2346,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
+HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the
+length of storageIDs. (szetszwo via Arpit Agarwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b18e5e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index b32092d..3406cf4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -506,8 +506,18 @@ public class DatanodeManager {
   }
 
   public DatanodeStorageInfo[] getDatanodeStorageInfos(
-  DatanodeID[] datanodeID, String[] storageIDs)
-  throws UnregisteredNodeException {
+  DatanodeID[] datanodeID, String[] storageIDs,
+  String format, Object... args) throws UnregisteredNodeException {
+if (datanodeID.length != storageIDs.length) {
+  final String err = (storageIDs.length == 0?
+  "Missing storageIDs: It is likely that the HDFS client,"
+  + " who made this call, is running in an older version of Hadoop"
+  + " which does not support storageIDs."
+  : "Length mismatched: storageIDs.length=" + storageIDs.length + " != 
"
+  ) + " datanodeID.length=" + datanodeID.length;
+  throw new HadoopIllegalArgumentException(
+  err + ", "+ String.format(format, args));
+}
 if (datanodeID.length == 0) {
   return null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b18e5e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 734e3ba..316b7de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2517,7 +2517,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   //find datanode storages
   final DatanodeManager dm = blockManager.getDatanodeManager();
-  chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, 
storageIDs));
+  chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs,
+  "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s",
+  src, fileId, blk, clientName, clientMachine));
 } finally {
   readUnlock();
 }
@@ -3319,7 +3321,7 @@ public class FSNamesystem implements Namesystem, 
FSN

[3/3] hadoop git commit: HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the length of storageIDs. (Contributed by szetszwo)

2015-11-06 Thread arp
HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the length of 
storageIDs. (Contributed by szetszwo)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63ed399e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63ed399e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63ed399e

Branch: refs/heads/branch-2.7
Commit: 63ed399e14087acb442eaffbee6e32f083c9
Parents: f7deb6b
Author: Arpit Agarwal 
Authored: Fri Nov 6 10:13:22 2015 -0800
Committer: Arpit Agarwal 
Committed: Fri Nov 6 10:32:34 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/DatanodeManager.java | 14 +++--
 .../hdfs/server/namenode/FSNamesystem.java  | 33 +++-
 .../TestCommitBlockSynchronization.java |  4 +--
 4 files changed, 35 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63ed399e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3e62c5d..65f7b61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -110,6 +110,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
+HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the
+length of storageIDs. (szetszwo via Arpit Agarwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63ed399e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 0cf1eee..c774d0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -502,8 +502,18 @@ public class DatanodeManager {
   }
 
   public DatanodeStorageInfo[] getDatanodeStorageInfos(
-  DatanodeID[] datanodeID, String[] storageIDs)
-  throws UnregisteredNodeException {
+  DatanodeID[] datanodeID, String[] storageIDs,
+  String format, Object... args) throws UnregisteredNodeException {
+if (datanodeID.length != storageIDs.length) {
+  final String err = (storageIDs.length == 0?
+  "Missing storageIDs: It is likely that the HDFS client,"
+  + " who made this call, is running in an older version of Hadoop"
+  + " which does not support storageIDs."
+  : "Length mismatched: storageIDs.length=" + storageIDs.length + " != 
"
+  ) + " datanodeID.length=" + datanodeID.length;
+  throw new HadoopIllegalArgumentException(
+  err + ", "+ String.format(format, args));
+}
 if (datanodeID.length == 0) {
   return null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63ed399e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a2e35eb..8cdae05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3350,7 +3350,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   //find datanode storages
   final DatanodeManager dm = blockManager.getDatanodeManager();
-  chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, 
storageIDs));
+  chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs,
+  "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s",
+  src, fileId, blk, clientName, clientMachine));
 } finally {
   readUnlock();
 }
@@ -4258,7 +4260,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
  + ", deleteBlock=" + deleteblock
  + ")");
 checkOperation(OperationCategory.WRITE);
-String src = "";
+final Strin

[2/3] hadoop git commit: HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the length of storageIDs. (Contributed by szetszwo)

2015-11-06 Thread arp
HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the length of 
storageIDs. (Contributed by szetszwo)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33908c6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33908c6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33908c6f

Branch: refs/heads/branch-2
Commit: 33908c6f52b0f9010c6fde64874f3fbb333a9890
Parents: 4794185
Author: Arpit Agarwal 
Authored: Fri Nov 6 10:13:22 2015 -0800
Committer: Arpit Agarwal 
Committed: Fri Nov 6 10:19:00 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/DatanodeManager.java | 14 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 36 +++-
 .../TestCommitBlockSynchronization.java |  4 +--
 4 files changed, 36 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33908c6f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8a53dce..0860c36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1503,6 +1503,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
+HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the
+length of storageIDs. (szetszwo via Arpit Agarwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33908c6f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 891ea89..870e5ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -505,8 +505,18 @@ public class DatanodeManager {
   }
 
   public DatanodeStorageInfo[] getDatanodeStorageInfos(
-  DatanodeID[] datanodeID, String[] storageIDs)
-  throws UnregisteredNodeException {
+  DatanodeID[] datanodeID, String[] storageIDs,
+  String format, Object... args) throws UnregisteredNodeException {
+if (datanodeID.length != storageIDs.length) {
+  final String err = (storageIDs.length == 0?
+  "Missing storageIDs: It is likely that the HDFS client,"
+  + " who made this call, is running in an older version of Hadoop"
+  + " which does not support storageIDs."
+  : "Length mismatched: storageIDs.length=" + storageIDs.length + " != 
"
+  ) + " datanodeID.length=" + datanodeID.length;
+  throw new HadoopIllegalArgumentException(
+  err + ", "+ String.format(format, args));
+}
 if (datanodeID.length == 0) {
   return null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33908c6f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 741a41b..d438ece 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2489,7 +2489,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   //find datanode storages
   final DatanodeManager dm = blockManager.getDatanodeManager();
-  chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, 
storageIDs));
+  chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs,
+  "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s",
+  src, fileId, blk, clientName, clientMachine));
 } finally {
   readUnlock();
 }
@@ -3264,7 +3266,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
  + ", deleteBlock=" + deleteblock
  + ")");
 checkOperation(OperationCategory.WRITE);
-String src = "";
+final String

hadoop git commit: HDFS-9236. Missing sanity check for block size during block recovery. (Tony Wu via Yongjun Zhang)

2015-11-06 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0b18e5e8c -> b64242c0d


HDFS-9236. Missing sanity check for block size during block recovery. (Tony Wu 
via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b64242c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b64242c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b64242c0

Branch: refs/heads/trunk
Commit: b64242c0d2cabd225a8fb7d25fed449d252e4fa1
Parents: 0b18e5e
Author: Yongjun Zhang 
Authored: Fri Nov 6 11:15:54 2015 -0800
Committer: Yongjun Zhang 
Committed: Fri Nov 6 11:15:54 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/datanode/BlockRecoveryWorker.java| 66 +++-
 .../server/protocol/ReplicaRecoveryInfo.java|  6 ++
 .../hdfs/server/datanode/TestBlockRecovery.java | 37 +++
 4 files changed, 110 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b64242c0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 63a99c4..a512da5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1629,6 +1629,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9377. Fix findbugs warnings in FSDirSnapshotOp.
 (Mingliang Liu via Yongjun Zhang)
 
+HDFS-9236. Missing sanity check for block size during block recovery.
+(Tony Wu via Yongjun Zhang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b64242c0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 42fcf48..9bd8703 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -103,8 +103,13 @@ public class BlockRecoveryWorker {
 protected void recover() throws IOException {
   List syncList = new ArrayList<>(locs.length);
   int errorCount = 0;
+  int candidateReplicaCnt = 0;
 
-  //check generation stamps
+  // Check generation stamps, replica size and state. Replica must satisfy
+  // the following criteria to be included in syncList for recovery:
+  // - Valid generation stamp
+  // - Non-zero length
+  // - Original state is RWR or better
   for(DatanodeID id : locs) {
 try {
   DatanodeID bpReg =datanode.getBPOfferService(bpid).bpRegistration;
@@ -115,7 +120,28 @@ public class BlockRecoveryWorker {
   if (info != null &&
   info.getGenerationStamp() >= block.getGenerationStamp() &&
   info.getNumBytes() > 0) {
-syncList.add(new BlockRecord(id, proxyDN, info));
+// Count the number of candidate replicas received.
+++candidateReplicaCnt;
+if (info.getOriginalReplicaState().getValue() <=
+ReplicaState.RWR.getValue()) {
+  syncList.add(new BlockRecord(id, proxyDN, info));
+} else {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Block recovery: Ignored replica with invalid " +
+"original state: " + info + " from DataNode: " + id);
+  }
+}
+  } else {
+if (LOG.isDebugEnabled()) {
+  if (info == null) {
+LOG.debug("Block recovery: DataNode: " + id + " does not have "
++ "replica for block: " + block);
+  } else {
+LOG.debug("Block recovery: Ignored replica with invalid "
++ "generation stamp or length: " + info + " from " +
+"DataNode: " + id);
+  }
+}
   }
 } catch (RecoveryInProgressException ripE) {
   InterDatanodeProtocol.LOG.warn(
@@ -136,6 +162,15 @@ public class BlockRecoveryWorker {
 + ", datanodeids=" + Arrays.asList(locs));
   }
 
+  // None of the replicas reported by DataNodes has the required original
+  // state, report the error.
+  if (candidateReplicaCnt > 0 && syncList.isEmpty()) {
+throw new IOException("Found " + candidateReplicaCnt

hadoop git commit: HDFS-9236. Missing sanity check for block size during block recovery. (Tony Wu via Yongjun Zhang)

2015-11-06 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 33908c6f5 -> 41d0d9a32


HDFS-9236. Missing sanity check for block size during block recovery. (Tony Wu 
via Yongjun Zhang)

(cherry picked from commit b64242c0d2cabd225a8fb7d25fed449d252e4fa1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41d0d9a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41d0d9a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41d0d9a3

Branch: refs/heads/branch-2
Commit: 41d0d9a32c880785a679e3dc5f066d5737aaef59
Parents: 33908c6
Author: Yongjun Zhang 
Authored: Fri Nov 6 11:15:54 2015 -0800
Committer: Yongjun Zhang 
Committed: Fri Nov 6 11:26:41 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/datanode/BlockRecoveryWorker.java| 66 +++-
 .../server/protocol/ReplicaRecoveryInfo.java|  6 ++
 .../hdfs/server/datanode/TestBlockRecovery.java | 37 +++
 4 files changed, 110 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41d0d9a3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0860c36..1c8e840 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -782,6 +782,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9377. Fix findbugs warnings in FSDirSnapshotOp.
 (Mingliang Liu via Yongjun Zhang)
 
+HDFS-9236. Missing sanity check for block size during block recovery.
+(Tony Wu via Yongjun Zhang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41d0d9a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 672c4d7..ae95579 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -103,8 +103,13 @@ public class BlockRecoveryWorker {
 protected void recover() throws IOException {
   List syncList = new ArrayList<>(locs.length);
   int errorCount = 0;
+  int candidateReplicaCnt = 0;
 
-  //check generation stamps
+  // Check generation stamps, replica size and state. Replica must satisfy
+  // the following criteria to be included in syncList for recovery:
+  // - Valid generation stamp
+  // - Non-zero length
+  // - Original state is RWR or better
   for(DatanodeID id : locs) {
 try {
   DatanodeID bpReg =datanode.getBPOfferService(bpid).bpRegistration;
@@ -115,7 +120,28 @@ public class BlockRecoveryWorker {
   if (info != null &&
   info.getGenerationStamp() >= block.getGenerationStamp() &&
   info.getNumBytes() > 0) {
-syncList.add(new BlockRecord(id, proxyDN, info));
+// Count the number of candidate replicas received.
+++candidateReplicaCnt;
+if (info.getOriginalReplicaState().getValue() <=
+ReplicaState.RWR.getValue()) {
+  syncList.add(new BlockRecord(id, proxyDN, info));
+} else {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Block recovery: Ignored replica with invalid " +
+"original state: " + info + " from DataNode: " + id);
+  }
+}
+  } else {
+if (LOG.isDebugEnabled()) {
+  if (info == null) {
+LOG.debug("Block recovery: DataNode: " + id + " does not have "
++ "replica for block: " + block);
+  } else {
+LOG.debug("Block recovery: Ignored replica with invalid "
++ "generation stamp or length: " + info + " from " +
+"DataNode: " + id);
+  }
+}
   }
 } catch (RecoveryInProgressException ripE) {
   InterDatanodeProtocol.LOG.warn(
@@ -136,6 +162,15 @@ public class BlockRecoveryWorker {
 + ", datanodeids=" + Arrays.asList(locs));
   }
 
+  // None of the replicas reported by DataNodes has the required original
+  // state, report the error.
+  if (candidateReplicaCnt > 0 && syncList.

hadoop git commit: HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla.

2015-11-06 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk b64242c0d -> bf6aa30a1


HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf6aa30a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf6aa30a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf6aa30a

Branch: refs/heads/trunk
Commit: bf6aa30a156b3c5cac5469014a5989e0dfdc7256
Parents: b64242c
Author: Kihwal Lee 
Authored: Fri Nov 6 13:30:33 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Nov 6 13:30:33 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../BlockPlacementPolicyDefault.java|  7 ++-
 .../src/main/resources/hdfs-default.xml |  9 
 .../TestReplicationPolicyConsiderLoad.java  | 56 
 5 files changed, 77 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf6aa30a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a512da5..f12a2a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1683,6 +1683,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9282. Make data directory count and storage raw capacity related tests
 FsDataset-agnostic. (Tony Wu via lei)
 
+HDFS-9318. considerLoad factor can be improved. (Kuhu Shukla via kihwal)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf6aa30a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c14ce20..54e0d10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -184,6 +184,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY;
   public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = 
true;
+  public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR =
+  "dfs.namenode.replication.considerLoad.factor";
+  public static final double
+  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
   public static final String  DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
   public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf6aa30a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d94179b..13b17e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -58,6 +58,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   };
 
   protected boolean considerLoad; 
+  protected double considerLoadFactor;
   private boolean preferLocalNode = true;
   protected NetworkTopology clusterMap;
   protected Host2NodesMap host2datanodeMap;
@@ -79,6 +80,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+this.considerLoadFactor = conf.getDouble(
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);
 this.stats = stats;
 this.clusterMap = clusterMap;
 this.host2datanodeMap = host2da

hadoop git commit: HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla. (cherry picked from commit bf6aa30a156b3c5cac5469014a5989e0dfdc7256)

2015-11-06 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 41d0d9a32 -> 481e7248d


HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla.
(cherry picked from commit bf6aa30a156b3c5cac5469014a5989e0dfdc7256)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481e7248
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481e7248
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481e7248

Branch: refs/heads/branch-2
Commit: 481e7248dee5b0a3e0f3148c3cdde133a637b990
Parents: 41d0d9a
Author: Kihwal Lee 
Authored: Fri Nov 6 14:08:10 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Nov 6 14:08:10 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../BlockPlacementPolicyDefault.java|  7 ++-
 .../src/main/resources/hdfs-default.xml |  9 
 .../TestReplicationPolicyConsiderLoad.java  | 56 
 5 files changed, 77 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1c8e840..4539cd3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -836,6 +836,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9282. Make data directory count and storage raw capacity related tests
 FsDataset-agnostic. (Tony Wu via lei)
 
+HDFS-9318. considerLoad factor can be improved. (Kuhu Shukla via kihwal)
+
   BUG FIXES
 
 HDFS-8091: ACLStatus and XAttributes should be presented to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1e6143c..e73aa2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -179,6 +179,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY;
   public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = 
true;
+  public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR =
+  "dfs.namenode.replication.considerLoad.factor";
+  public static final double
+  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
   public static final String  DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
   public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d94179b..13b17e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -58,6 +58,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   };
 
   protected boolean considerLoad; 
+  protected double considerLoadFactor;
   private boolean preferLocalNode = true;
   protected NetworkTopology clusterMap;
   protected Host2NodesMap host2datanodeMap;
@@ -79,6 +80,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+this.considerLoadFactor = conf.getDouble(
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);
 this.stats = stats;
 t

[1/2] hadoop git commit: HDFS-9379. Make NNThroughputBenchmark support more than 10 datanodes. (Contributed by Mingliang Liu)

2015-11-06 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 481e7248d -> 1e0746e75
  refs/heads/trunk bf6aa30a1 -> 2801b42a7


HDFS-9379. Make NNThroughputBenchmark support more than 10 datanodes. 
(Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2801b42a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2801b42a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2801b42a

Branch: refs/heads/trunk
Commit: 2801b42a7e178ad6a0e6b0f29f22f3571969c530
Parents: bf6aa30
Author: Arpit Agarwal 
Authored: Fri Nov 6 18:58:49 2015 -0800
Committer: Arpit Agarwal 
Committed: Fri Nov 6 18:58:49 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../hadoop/hdfs/server/namenode/NNThroughputBenchmark.java | 6 +-
 2 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2801b42a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f12a2a4..dbffc3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1632,6 +1632,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9236. Missing sanity check for block size during block recovery.
 (Tony Wu via Yongjun Zhang)
 
+HDFS-9379. Make NNThroughputBenchmark$BlockReportStats support more than 10
+datanodes. (Mingliang Liu via Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2801b42a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index b963d8f..6d27315 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -1145,14 +1145,10 @@ public class NNThroughputBenchmark implements Tool {
   int nrFiles = (int)Math.ceil((double)nrBlocks / blocksPerFile);
   datanodes = new TinyDatanode[nrDatanodes];
   // create data-nodes
-  String prevDNName = "";
   for(int idx=0; idx < nrDatanodes; idx++) {
 datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
 datanodes[idx].register();
-assert datanodes[idx].getXferAddr().compareTo(prevDNName) > 0
-  : "Data-nodes must be sorted lexicographically.";
 datanodes[idx].sendHeartbeat();
-prevDNName = datanodes[idx].getXferAddr();
   }
 
   // create files 
@@ -1184,7 +1180,7 @@ public class NNThroughputBenchmark implements Tool {
 prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
 prevBlock = loc.getBlock();
 for(DatanodeInfo dnInfo : loc.getLocations()) {
-  int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
+  int dnIdx = dnInfo.getXferPort() - 1;
   datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
   ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
   loc.getBlock().getLocalBlock(),



[2/2] hadoop git commit: HDFS-9379. Make NNThroughputBenchmark support more than 10 datanodes. (Contributed by Mingliang Liu)

2015-11-06 Thread arp
HDFS-9379. Make NNThroughputBenchmark support more than 10 datanodes. 
(Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e0746e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e0746e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e0746e7

Branch: refs/heads/branch-2
Commit: 1e0746e7565a2a0fb8b160ac42a4cde9ba94d5b7
Parents: 481e724
Author: Arpit Agarwal 
Authored: Fri Nov 6 18:58:49 2015 -0800
Committer: Arpit Agarwal 
Committed: Fri Nov 6 18:59:03 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../hadoop/hdfs/server/namenode/NNThroughputBenchmark.java | 6 +-
 2 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e0746e7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4539cd3..17dc78a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -785,6 +785,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9236. Missing sanity check for block size during block recovery.
 (Tony Wu via Yongjun Zhang)
 
+HDFS-9379. Make NNThroughputBenchmark$BlockReportStats support more than 10
+datanodes. (Mingliang Liu via Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e0746e7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index b963d8f..6d27315 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -1145,14 +1145,10 @@ public class NNThroughputBenchmark implements Tool {
   int nrFiles = (int)Math.ceil((double)nrBlocks / blocksPerFile);
   datanodes = new TinyDatanode[nrDatanodes];
   // create data-nodes
-  String prevDNName = "";
   for(int idx=0; idx < nrDatanodes; idx++) {
 datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
 datanodes[idx].register();
-assert datanodes[idx].getXferAddr().compareTo(prevDNName) > 0
-  : "Data-nodes must be sorted lexicographically.";
 datanodes[idx].sendHeartbeat();
-prevDNName = datanodes[idx].getXferAddr();
   }
 
   // create files 
@@ -1184,7 +1180,7 @@ public class NNThroughputBenchmark implements Tool {
 prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
 prevBlock = loc.getBlock();
 for(DatanodeInfo dnInfo : loc.getLocations()) {
-  int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
+  int dnIdx = dnInfo.getXferPort() - 1;
   datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
   ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
   loc.getBlock().getLocalBlock(),