hadoop git commit: HDFS-8366. Erasure Coding: Make the timeout parameter of polling blocking queue configurable in DFSStripedOutputStream. Contributed by Li Bo

2015-05-19 Thread libo
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 12d030bef - 3cf3398f3


HDFS-8366. Erasure Coding: Make the timeout parameter of polling blocking queue 
configurable in DFSStripedOutputStream. Contributed by Li Bo


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cf3398f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cf3398f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cf3398f

Branch: refs/heads/HDFS-7285
Commit: 3cf3398f3c23b9e9ca421cfa66fdf15081fc86da
Parents: 12d030b
Author: boli2 bo.b...@intel.com (file://bo.b...@intel.com/)
Authored: Tue May 19 02:14:46 2015 -0400
Committer: boli2 bo.b...@intel.com (file://bo.b...@intel.com/)
Committed: Tue May 19 02:14:46 2015 -0400

--
 .../hdfs/client/HdfsClientConfigKeys.java   | 10 
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +++
 .../hadoop/hdfs/DFSStripedOutputStream.java | 19 
 .../hadoop/hdfs/client/impl/DfsClientConf.java  | 24 
 4 files changed, 51 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3398f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 6006d71..9373e98 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -189,6 +189,16 @@ public interface HdfsClientConfigKeys {
 int THREADPOOL_SIZE_DEFAULT = 18;
   }
 
+  /** dfs.client.write.striped configuration properties */
+  interface StripedWrite {
+String PREFIX = Write.PREFIX + striped.;
+
+String  MAX_SECONDS_GET_STRIPED_BLOCK_KEY = PREFIX + 
max-seconds-get-striped-block;
+int MAX_SECONDS_GET_STRIPED_BLOCK_DEFAULT = 90;
+String  MAX_SECONDS_GET_ENDED_BLOCK_KEY = PREFIX + 
max-seconds-get-ended-block;
+int MAX_SECONDS_GET_ENDED_BLOCK_DEFAULT = 60;
+  }
+
   /** dfs.http.client configuration properties */
   interface HttpClient {
 String  PREFIX = dfs.http.client.;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3398f/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
old mode 100644
new mode 100755
index 3170e9b..939ba89
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -225,3 +225,6 @@
 (Yi Liu via jing9)
 
 HDFS-8320. Erasure coding: consolidate striping-related terminologies. 
(zhz)
+   
+   HDFS-8366. Erasure Coding: Make the timeout parameter of polling 
blocking queue 
+   configurable in DFSStripedOutputStream. (Li Bo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3398f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index b99afab..a648023 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -33,6 +33,8 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -61,11 +63,14 @@ import com.google.common.base.Preconditions;
 public class DFSStripedOutputStream extends DFSOutputStream {
   /** Coordinate the communication between the streamers. */
   static class Coordinator {
+private final DfsClientConf conf;
 private final ListBlockingQueueExtendedBlock endBlocks;
 private final ListBlockingQueueLocatedBlock stripedBlocks;
  

[1/2] hadoop git commit: HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions (Contributed by Rakesh R)

2015-05-19 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5caea4cd4 - 6dee42f6d
  refs/heads/trunk 0790275f0 - 93972a332


HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions 
(Contributed by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93972a33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93972a33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93972a33

Branch: refs/heads/trunk
Commit: 93972a332a9fc6390447fc5fc9785c98fb4c3344
Parents: 0790275
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 19 12:24:25 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 19 12:24:25 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  5 ++-
 .../hdfs/server/namenode/SecondaryNameNode.java | 33 ++--
 .../hdfs/server/namenode/TestStartup.java   | 18 ++-
 3 files changed, 37 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93972a33/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 35c3b5a..e5fcba2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,4 +1,4 @@
-Hadoop HDFS Change Log
+ Hadoop HDFS Change Log
 
 Trunk (Unreleased)
 
@@ -788,6 +788,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8403. Eliminate retries in TestFileCreation
 #testOverwriteOpenForWrite. (Arpit Agarwal via wheat9)
 
+HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions
+(Rakesh R via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93972a33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index b499e74..0fa1cd5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -667,29 +667,28 @@ public class SecondaryNameNode implements Runnable,
   opts.usage();
   System.exit(0);
 }
-
-StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
-Configuration tconf = new HdfsConfiguration();
-SecondaryNameNode secondary = null;
+
 try {
+  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
+  Configuration tconf = new HdfsConfiguration();
+  SecondaryNameNode secondary = null;
   secondary = new SecondaryNameNode(tconf, opts);
-} catch (IOException ioe) {
-  LOG.fatal(Failed to start secondary namenode, ioe);
-  terminate(1);
-}
 
-if (opts != null  opts.getCommand() != null) {
-  int ret = secondary.processStartupCommand(opts);
-  terminate(ret);
-}
+  if (opts != null  opts.getCommand() != null) {
+int ret = secondary.processStartupCommand(opts);
+terminate(ret);
+  }
 
-if (secondary != null) {
-  secondary.startCheckpointThread();
-  secondary.join();
+  if (secondary != null) {
+secondary.startCheckpointThread();
+secondary.join();
+  }
+} catch (Throwable e) {
+  LOG.fatal(Failed to start secondary namenode, e);
+  terminate(1);
 }
   }
-  
-  
+
   public void startCheckpointThread() {
 Preconditions.checkState(checkpointThread == null,
 Should not already have a thread);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93972a33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 01621ad..4d3cb75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -60,6 +60,8 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import 

[2/2] hadoop git commit: HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions (Contributed by Rakesh R)

2015-05-19 Thread vinayakumarb
HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions 
(Contributed by Rakesh R)

(cherry picked from commit 93972a332a9fc6390447fc5fc9785c98fb4c3344)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6dee42f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6dee42f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6dee42f6

Branch: refs/heads/branch-2
Commit: 6dee42f6dba12106b7218d068b8eaa5df8739b36
Parents: 5caea4c
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 19 12:24:25 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 19 12:25:00 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  5 ++-
 .../hdfs/server/namenode/SecondaryNameNode.java | 33 ++--
 .../hdfs/server/namenode/TestStartup.java   | 18 ++-
 3 files changed, 37 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dee42f6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 36c3fe0..07bbd36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,4 +1,4 @@
-Hadoop HDFS Change Log
+ Hadoop HDFS Change Log
 
 Release 2.8.0 - UNRELEASED
 
@@ -454,6 +454,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8403. Eliminate retries in TestFileCreation
 #testOverwriteOpenForWrite. (Arpit Agarwal via wheat9)
 
+HDFS-6348. SecondaryNameNode not terminating properly on runtime exceptions
+(Rakesh R via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dee42f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index b499e74..0fa1cd5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -667,29 +667,28 @@ public class SecondaryNameNode implements Runnable,
   opts.usage();
   System.exit(0);
 }
-
-StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
-Configuration tconf = new HdfsConfiguration();
-SecondaryNameNode secondary = null;
+
 try {
+  StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
+  Configuration tconf = new HdfsConfiguration();
+  SecondaryNameNode secondary = null;
   secondary = new SecondaryNameNode(tconf, opts);
-} catch (IOException ioe) {
-  LOG.fatal(Failed to start secondary namenode, ioe);
-  terminate(1);
-}
 
-if (opts != null  opts.getCommand() != null) {
-  int ret = secondary.processStartupCommand(opts);
-  terminate(ret);
-}
+  if (opts != null  opts.getCommand() != null) {
+int ret = secondary.processStartupCommand(opts);
+terminate(ret);
+  }
 
-if (secondary != null) {
-  secondary.startCheckpointThread();
-  secondary.join();
+  if (secondary != null) {
+secondary.startCheckpointThread();
+secondary.join();
+  }
+} catch (Throwable e) {
+  LOG.fatal(Failed to start secondary namenode, e);
+  terminate(1);
 }
   }
-  
-  
+
   public void startCheckpointThread() {
 Preconditions.checkState(checkpointThread == null,
 Should not already have a thread);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dee42f6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 08fde3e..4b2878e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -62,6 +62,8 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.GenericTestUtils;
 import 

hadoop git commit: HDFS-8378. Erasure Coding: Few improvements for the erasure coding worker. Contributed by Rakesh R.

2015-05-19 Thread waltersu4549
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 3cf3398f3 - 3676277c1


HDFS-8378. Erasure Coding: Few improvements for the erasure coding worker. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3676277c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3676277c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3676277c

Branch: refs/heads/HDFS-7285
Commit: 3676277c15d341be758738d975f426119605835f
Parents: 3cf3398
Author: Walter Su waltersu4...@apache.org
Authored: Tue May 19 14:59:23 2015 +0800
Committer: Walter Su waltersu4...@apache.org
Committed: Tue May 19 14:59:23 2015 +0800

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 ++
 .../hdfs/server/datanode/BPOfferService.java|  1 +
 .../erasurecode/ErasureCodingWorker.java| 59 ++--
 3 files changed, 35 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3676277c/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 939ba89..1e7dbea 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -218,6 +218,8 @@
 
 HDFS-8367. BlockInfoStriped uses EC schema. (Kai Sasaki via Kai Zheng)
 
+HDFS-8352. Erasure Coding: test webhdfs read write stripe file. 
(waltersu4549)
+
 HDFS-8417. Erasure Coding: Pread failed to read data starting from 
not-first stripe.
 (Walter Su via jing9)
 
@@ -228,3 +230,6 @@

HDFS-8366. Erasure Coding: Make the timeout parameter of polling 
blocking queue 
configurable in DFSStripedOutputStream. (Li Bo)
+
+HDFS-8378. Erasure Coding: Few improvements for the erasure coding worker.
+(Rakesh R via waltersu4549)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3676277c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 6606d0b..d77b36d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -728,6 +728,7 @@ class BPOfferService {
   LOG.info(DatanodeCommand action: DNA_ERASURE_CODING_RECOVERY);
   CollectionBlockECRecoveryInfo ecTasks = ((BlockECRecoveryCommand) 
cmd).getECTasks();
   dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
+  break;
 default:
   LOG.warn(Unknown DatanodeCommand action:  + cmd.getAction());
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3676277c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index a1c0f72..4723e9f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -62,7 +62,6 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
@@ -88,12 +87,12 @@ import com.google.common.base.Preconditions;
  * commands.
  */
 public final class ErasureCodingWorker {
-  private final Log LOG = DataNode.LOG;
+  private static final Log LOG = DataNode.LOG;
   
   private final DataNode datanode; 
-  private Configuration conf;
+  private final Configuration conf;
 
-  private 

[2/3] hadoop git commit: HADOOP-8934. Shell command ls should include sort options (Jonathan Allen via aw) (missed file)

2015-05-19 Thread aajisaka
HADOOP-8934. Shell command ls should include sort options (Jonathan Allen via 
aw) (missed file)

(cherry picked from commit 576459801c4e21effc4e3bca796527896b6e4f4b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df6d242e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df6d242e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df6d242e

Branch: refs/heads/branch-2
Commit: df6d242e2866311fe1e20dffc6d6077003978228
Parents: 3d0e2e5
Author: Allen Wittenauer a...@apache.org
Authored: Mon Feb 9 12:54:25 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue May 19 17:39:12 2015 +0900

--
 .../java/org/apache/hadoop/fs/shell/TestLs.java | 1308 ++
 1 file changed, 1308 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df6d242e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
new file mode 100644
index 000..66403db
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestLs.java
@@ -0,0 +1,1308 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell;
+
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.URI;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.LinkedList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FilterFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.InOrder;
+
+/**
+ * JUnit test class for {@link org.apache.hadoop.fs.shell.Ls}
+ *
+ */
+public class TestLs {
+  private static Configuration conf;
+  private static FileSystem mockFs;
+
+  private static final Date NOW = new Date();
+
+  @BeforeClass
+  public static void setup() throws IOException {
+conf = new Configuration();
+conf.set(fs.defaultFS, mockfs:///);
+conf.setClass(fs.mockfs.impl, MockFileSystem.class, FileSystem.class);
+mockFs = mock(FileSystem.class);
+  }
+
+  @Before
+  public void resetMock() throws IOException {
+reset(mockFs);
+AclStatus mockAclStatus = mock(AclStatus.class);
+when(mockAclStatus.getEntries()).thenReturn(new ArrayListAclEntry());
+when(mockFs.getAclStatus(any(Path.class))).thenReturn(mockAclStatus);
+  }
+
+  // check that default options are correct
+  @Test
+  public void processOptionsNone() throws IOException {
+LinkedListString options = new LinkedListString();
+Ls ls = new Ls();
+ls.processOptions(options);
+assertTrue(ls.isDirRecurse());
+assertFalse(ls.isHumanReadable());
+assertFalse(ls.isRecursive());
+assertFalse(ls.isOrderReverse());
+assertFalse(ls.isOrderSize());
+assertFalse(ls.isOrderTime());
+assertFalse(ls.isUseAtime());
+  }
+
+  // check the -d option is recognised
+  @Test
+  public void processOptionsDirectory() throws IOException {
+LinkedListString options = new LinkedListString();
+options.add(-d);
+Ls ls = new Ls();
+ls.processOptions(options);
+assertFalse(ls.isDirRecurse());
+assertFalse(ls.isHumanReadable());
+assertFalse(ls.isRecursive());
+assertFalse(ls.isOrderReverse());
+assertFalse(ls.isOrderSize());
+assertFalse(ls.isOrderTime());
+assertFalse(ls.isUseAtime());
+  }
+
+  // check the -h option is 

[1/3] hadoop git commit: HADOOP-8934. Shell command ls should include sort options (Jonathan Allen via aw)

2015-05-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6dee42f6d - fb49967e9


HADOOP-8934. Shell command ls should include sort options (Jonathan Allen via 
aw)

(cherry picked from commit 30b797ee9df30260314eeadffc7d51492871b352)

Conflicts:
hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d0e2e54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d0e2e54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d0e2e54

Branch: refs/heads/branch-2
Commit: 3d0e2e54a19937996aecdb2773d6fcc919e591e3
Parents: 6dee42f
Author: Allen Wittenauer a...@apache.org
Authored: Mon Feb 9 12:50:44 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue May 19 17:38:22 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/fs/shell/Ls.java | 187 ---
 .../src/test/resources/testConf.xml |  34 +++-
 .../src/test/resources/testHDFSConf.xml | 148 +++
 4 files changed, 343 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d0e2e54/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3205a4a..93304dc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -19,6 +19,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-11949. Add user-provided plugins to test-patch (Sean Busbey via aw)
 
+HADOOP-8934. Shell command ls should include sort options (Jonathan Allen
+via aw)
+
   IMPROVEMENTS
 
 HADOOP-6842. hadoop fs -text does not give a useful text representation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d0e2e54/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index c7e80b6..0e46700 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.fs.shell;
 
 import java.io.IOException;
 import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Comparator;
 import java.util.Date;
 import java.util.LinkedList;
 import org.apache.hadoop.util.StringUtils;
@@ -40,29 +42,59 @@ class Ls extends FsCommand {
 factory.addClass(Ls.class, -ls);
 factory.addClass(Lsr.class, -lsr);
   }
-  
+
+  private static final String OPTION_DIRECTORY = d;
+  private static final String OPTION_HUMAN = h;
+  private static final String OPTION_RECURSIVE = R;
+  private static final String OPTION_REVERSE = r;
+  private static final String OPTION_MTIME = t;
+  private static final String OPTION_ATIME = u;
+  private static final String OPTION_SIZE = S;
+
   public static final String NAME = ls;
-  public static final String USAGE = [-d] [-h] [-R] [path ...];
+  public static final String USAGE = [- + OPTION_DIRECTORY + ] [-
+  + OPTION_HUMAN + ]  + [- + OPTION_RECURSIVE + ] [- + OPTION_MTIME
+  + ] [- + OPTION_SIZE + ] [- + OPTION_REVERSE + ]  + [-
+  + OPTION_ATIME + ] [path ...];
+
   public static final String DESCRIPTION =
-   List the contents that match the specified file pattern. 
If  +
-   path is not specified, the contents of /user/currentUser 
 +
-   will be listed. Directory entries are of the form:\n +
-   \tpermissions - userId groupId sizeOfDirectory(in bytes) 
modificationDate(-MM-dd HH:mm) directoryName\n\n +
-   and file entries are of the form:\n +
-   \tpermissions numberOfReplicas userId groupId 
sizeOfFile(in bytes) modificationDate(-MM-dd HH:mm) fileName\n +
-   -d:  Directories are listed as plain files.\n +
-   -h:  Formats the sizes of files in a human-readable 
fashion  +
-   rather than a number of bytes.\n +
-   -R:  Recursively list the contents of directories.;
- 
-  
-
-  protected final SimpleDateFormat dateFormat =
+  List the contents that match the specified file pattern. If  +
+  path is not specified, the contents of /user/currentUser  +
+  will be listed. For a directory a list of its direct children  +
+  is 

hadoop git commit: HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only. Contributed by Kengo Seki.

2015-05-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 93972a332 - 3b50dcdce


HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only. 
Contributed by Kengo Seki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b50dcdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b50dcdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b50dcdc

Branch: refs/heads/trunk
Commit: 3b50dcdce4ffe3d4e5892fca84909ff22be28739
Parents: 93972a3
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue May 19 17:25:27 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue May 19 17:25:27 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../java/org/apache/hadoop/fs/shell/Ls.java | 36 +--
 .../src/site/markdown/FileSystemShell.md|  3 +-
 .../java/org/apache/hadoop/fs/shell/TestLs.java | 67 
 .../src/test/resources/testConf.xml |  6 +-
 5 files changed, 106 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b50dcdc/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index cf09c5f..8ce77b6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -488,6 +488,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-11949. Add user-provided plugins to test-patch (Sean Busbey via aw)
 
+HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only.
+(Kengo Seki via aajisaka)
+
   IMPROVEMENTS
 
 HADOOP-6842. hadoop fs -text does not give a useful text representation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b50dcdc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 8ef6d5e..d5c52ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -43,6 +43,7 @@ class Ls extends FsCommand {
 factory.addClass(Lsr.class, -lsr);
   }
 
+  private static final String OPTION_PATHONLY = C;
   private static final String OPTION_DIRECTORY = d;
   private static final String OPTION_HUMAN = h;
   private static final String OPTION_RECURSIVE = R;
@@ -52,10 +53,10 @@ class Ls extends FsCommand {
   private static final String OPTION_SIZE = S;
 
   public static final String NAME = ls;
-  public static final String USAGE = [- + OPTION_DIRECTORY + ] [-
-  + OPTION_HUMAN + ]  + [- + OPTION_RECURSIVE + ] [- + OPTION_MTIME
-  + ] [- + OPTION_SIZE + ] [- + OPTION_REVERSE + ]  + [-
-  + OPTION_ATIME + ] [path ...];
+  public static final String USAGE = [- + OPTION_PATHONLY + ] [-
+  + OPTION_DIRECTORY + ] [- + OPTION_HUMAN + ] [- + OPTION_RECURSIVE
+  + ] [- + OPTION_MTIME + ] [- + OPTION_SIZE + ] [- + OPTION_REVERSE
+  + ] [- + OPTION_ATIME + ] [path ...];
 
   public static final String DESCRIPTION =
   List the contents that match the specified file pattern. If  +
@@ -67,6 +68,8 @@ class Ls extends FsCommand {
   \tpermissions - userId groupId sizeOfDirectory(in bytes) 
modificationDate(-MM-dd HH:mm) directoryName\n\n +
   and file entries are of the form:\n +
   \tpermissions numberOfReplicas userId groupId sizeOfFile(in bytes) 
modificationDate(-MM-dd HH:mm) fileName\n\n +
+- + OPTION_PATHONLY +
+Display the paths of files and directories only.\n +
 - + OPTION_DIRECTORY +
 Directories are listed as plain files.\n +
 - + OPTION_HUMAN +
@@ -89,6 +92,7 @@ class Ls extends FsCommand {
 
   protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0;
   protected String lineFormat;
+  private boolean pathOnly;
   protected boolean dirRecurse;
   private boolean orderReverse;
   private boolean orderTime;
@@ -107,10 +111,11 @@ class Ls extends FsCommand {
   @Override
   protected void processOptions(LinkedListString args)
   throws IOException {
-CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
+CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, OPTION_PATHONLY,
 OPTION_DIRECTORY, OPTION_HUMAN, OPTION_RECURSIVE, OPTION_REVERSE,
 OPTION_MTIME, OPTION_SIZE, OPTION_ATIME);
 cf.parse(args);
+pathOnly = cf.getOpt(OPTION_PATHONLY);
 

[3/3] hadoop git commit: HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only. Contributed by Kengo Seki.

2015-05-19 Thread aajisaka
HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only. 
Contributed by Kengo Seki.

(cherry picked from commit 3b50dcdce4ffe3d4e5892fca84909ff22be28739)

Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb49967e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb49967e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb49967e

Branch: refs/heads/branch-2
Commit: fb49967e97afa771fe5bd95085d5d92ea2a8a484
Parents: df6d242
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue May 19 17:25:27 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue May 19 17:58:52 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../java/org/apache/hadoop/fs/shell/Ls.java | 36 +--
 .../src/site/markdown/FileSystemShell.md|  3 +-
 .../java/org/apache/hadoop/fs/shell/TestLs.java | 67 
 .../src/test/resources/testConf.xml |  6 +-
 5 files changed, 106 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb49967e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 93304dc..4057c4e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -22,6 +22,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-8934. Shell command ls should include sort options (Jonathan Allen
 via aw)
 
+HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only.
+(Kengo Seki via aajisaka)
+
   IMPROVEMENTS
 
 HADOOP-6842. hadoop fs -text does not give a useful text representation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb49967e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 0e46700..171d221 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -43,6 +43,7 @@ class Ls extends FsCommand {
 factory.addClass(Lsr.class, -lsr);
   }
 
+  private static final String OPTION_PATHONLY = C;
   private static final String OPTION_DIRECTORY = d;
   private static final String OPTION_HUMAN = h;
   private static final String OPTION_RECURSIVE = R;
@@ -52,10 +53,10 @@ class Ls extends FsCommand {
   private static final String OPTION_SIZE = S;
 
   public static final String NAME = ls;
-  public static final String USAGE = [- + OPTION_DIRECTORY + ] [-
-  + OPTION_HUMAN + ]  + [- + OPTION_RECURSIVE + ] [- + OPTION_MTIME
-  + ] [- + OPTION_SIZE + ] [- + OPTION_REVERSE + ]  + [-
-  + OPTION_ATIME + ] [path ...];
+  public static final String USAGE = [- + OPTION_PATHONLY + ] [-
+  + OPTION_DIRECTORY + ] [- + OPTION_HUMAN + ] [- + OPTION_RECURSIVE
+  + ] [- + OPTION_MTIME + ] [- + OPTION_SIZE + ] [- + OPTION_REVERSE
+  + ] [- + OPTION_ATIME + ] [path ...];
 
   public static final String DESCRIPTION =
   List the contents that match the specified file pattern. If  +
@@ -67,6 +68,8 @@ class Ls extends FsCommand {
   \tpermissions - userId groupId sizeOfDirectory(in bytes) 
modificationDate(-MM-dd HH:mm) directoryName\n\n +
   and file entries are of the form:\n +
   \tpermissions numberOfReplicas userId groupId sizeOfFile(in bytes) 
modificationDate(-MM-dd HH:mm) fileName\n\n +
+- + OPTION_PATHONLY +
+Display the paths of files and directories only.\n +
 - + OPTION_DIRECTORY +
 Directories are listed as plain files.\n +
 - + OPTION_HUMAN +
@@ -89,6 +92,7 @@ class Ls extends FsCommand {
 
   protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0;
   protected String lineFormat;
+  private boolean pathOnly;
   protected boolean dirRecurse;
   private boolean orderReverse;
   private boolean orderTime;
@@ -107,10 +111,11 @@ class Ls extends FsCommand {
   @Override
   protected void processOptions(LinkedListString args)
   throws IOException {
-CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
+CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, OPTION_PATHONLY,
 OPTION_DIRECTORY, OPTION_HUMAN, OPTION_RECURSIVE, OPTION_REVERSE,
 OPTION_MTIME, OPTION_SIZE, OPTION_ATIME);
 

hadoop git commit: HADOOP-11581. Multithreaded correctness Warnings #org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)

2015-05-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5bbf157cb - 6c4aa89e5


HADOOP-11581. Multithreaded correctness Warnings #org.apache.hadoop.fs.shell.Ls 
(Brahma Reddy Battula via aw)

(cherry picked from commit b015fec6011586784fd6f4791a0c1c22e3902046)

Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c4aa89e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c4aa89e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c4aa89e

Branch: refs/heads/branch-2
Commit: 6c4aa89e564edf409d6833ce18557b4c09389a9b
Parents: 5bbf157
Author: Allen Wittenauer a...@apache.org
Authored: Wed Feb 11 07:49:05 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue May 19 18:36:19 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/fs/shell/Ls.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c4aa89e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 40d6c54..f0004e4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -251,6 +251,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-10582. Fix the test case for copying to non-existent dir in
 TestFsShellCopy. (Kousuke Saruta via aajisaka)
 
+HADOOP-11581. Multithreaded correctness Warnings
+#org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c4aa89e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 171d221..d5c52ed 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -87,7 +87,7 @@ class Ls extends FsCommand {
 Use time of last access instead of modification for\n +
 display and sorting.;
 
-  protected static final SimpleDateFormat dateFormat =
+  protected final SimpleDateFormat dateFormat =
 new SimpleDateFormat(-MM-dd HH:mm);
 
   protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0;



hadoop git commit: Move HADOOP-8934 in CHANGES.txt from 3.0.0 to 2.8.0.

2015-05-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3b50dcdce - f889a4927


Move HADOOP-8934 in CHANGES.txt from 3.0.0 to 2.8.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f889a492
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f889a492
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f889a492

Branch: refs/heads/trunk
Commit: f889a49271f368e8d37a156fb1c568f6d286e88a
Parents: 3b50dcd
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue May 19 18:01:24 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue May 19 18:02:10 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f889a492/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8ce77b6..ee7d1e3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -38,9 +38,6 @@ Trunk (Unreleased)
 
 HADOOP-11485. Pluggable shell integration (aw)
 
-HADOOP-8934. Shell command ls should include sort options (Jonathan Allen
-via aw)
-
 HADOOP-11554. Expose HadoopKerberosName as a hadoop subcommand (aw)
 
 HADOOP-11565. Add --slaves shell option (aw)
@@ -488,6 +485,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-11949. Add user-provided plugins to test-patch (Sean Busbey via aw)
 
+HADOOP-8934. Shell command ls should include sort options (Jonathan Allen
+via aw)
+
 HADOOP-10971. Add -C flag to make `hadoop fs -ls` print filenames only.
 (Kengo Seki via aajisaka)
 



[1/2] hadoop git commit: HADOOP-11103. Clean up RemoteException (Contributed by Sean Busbey)

2015-05-19 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fb49967e9 - 5bbf157cb
  refs/heads/trunk f889a4927 - d4a2830b6


HADOOP-11103. Clean up RemoteException (Contributed by Sean Busbey)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4a2830b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4a2830b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4a2830b

Branch: refs/heads/trunk
Commit: d4a2830b63f0819979b592f4ea6ea3abd5885b71
Parents: f889a49
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 19 14:41:05 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 19 14:41:05 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../main/java/org/apache/hadoop/ipc/Client.java |  5 +--
 .../org/apache/hadoop/ipc/RemoteException.java  | 32 +++-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  4 +++
 .../hdfs/server/namenode/ha/TestHASafeMode.java |  3 ++
 5 files changed, 35 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a2830b/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ee7d1e3..8c7c978 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -588,6 +588,8 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-1540. Support file exclusion list in distcp. (Rich Haase via jing9)
 
+HADOOP-11103. Clean up RemoteException (Sean Busbey via vinayakumarb)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a2830b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 97b715b..f28d8a2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1139,10 +1139,7 @@ public class Client {
   if (erCode == null) {
  LOG.warn(Detailed error code not set by server on rpc error);
   }
-  RemoteException re = 
-  ( (erCode == null) ? 
-  new RemoteException(exceptionClassName, errorMsg) :
-  new RemoteException(exceptionClassName, errorMsg, erCode));
+  RemoteException re = new RemoteException(exceptionClassName, 
errorMsg, erCode);
   if (status == RpcStatusProto.ERROR) {
 calls.remove(callId);
 call.setException(re);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a2830b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
index 7926d86..620e100 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
@@ -25,31 +25,46 @@ import 
org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.Rpc
 import org.xml.sax.Attributes;
 
 public class RemoteException extends IOException {
+  /** this value should not be defined in RpcHeader.proto so that protobuf 
will return a null */
+  private static final int UNSPECIFIED_ERROR = -1;
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;
   private final int errorCode;
 
-  private String className;
+  private final String className;
   
+  /**
+   * @param className wrapped exception, may be null
+   * @param msg may be null
+   */
   public RemoteException(String className, String msg) {
-super(msg);
-this.className = className;
-errorCode = -1;
+this(className, msg, null);
   }
   
+  /**
+   * @param className wrapped exception, may be null
+   * @param msg may be null
+   * @param erCode may be null
+   */
   public RemoteException(String className, String msg, RpcErrorCodeProto 
erCode) {
 super(msg);
 this.className = className;
 if (erCode != null)
   errorCode = erCode.getNumber();
 else 
-  errorCode = -1;
+  errorCode 

[2/2] hadoop git commit: HADOOP-11103. Clean up RemoteException (Contributed by Sean Busbey)

2015-05-19 Thread vinayakumarb
HADOOP-11103. Clean up RemoteException (Contributed by Sean Busbey)

(cherry picked from commit d4a2830b63f0819979b592f4ea6ea3abd5885b71)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bbf157c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bbf157c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bbf157c

Branch: refs/heads/branch-2
Commit: 5bbf157cb23b3838da641915e6fceb2d5ae00412
Parents: fb49967
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 19 14:41:05 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 19 14:41:34 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../main/java/org/apache/hadoop/ipc/Client.java |  5 +--
 .../org/apache/hadoop/ipc/RemoteException.java  | 32 +++-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  4 +++
 .../hdfs/server/namenode/ha/TestHASafeMode.java |  3 ++
 5 files changed, 35 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bbf157c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4057c4e..40d6c54 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -120,6 +120,8 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-1540. Support file exclusion list in distcp. (Rich Haase via jing9)
 
+HADOOP-11103. Clean up RemoteException (Sean Busbey via vinayakumarb)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bbf157c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 30ccdda..4013bdb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1139,10 +1139,7 @@ public class Client {
   if (erCode == null) {
  LOG.warn(Detailed error code not set by server on rpc error);
   }
-  RemoteException re = 
-  ( (erCode == null) ? 
-  new RemoteException(exceptionClassName, errorMsg) :
-  new RemoteException(exceptionClassName, errorMsg, erCode));
+  RemoteException re = new RemoteException(exceptionClassName, 
errorMsg, erCode);
   if (status == RpcStatusProto.ERROR) {
 calls.remove(callId);
 call.setException(re);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bbf157c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
index 7926d86..620e100 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
@@ -25,31 +25,46 @@ import 
org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.Rpc
 import org.xml.sax.Attributes;
 
 public class RemoteException extends IOException {
+  /** this value should not be defined in RpcHeader.proto so that protobuf 
will return a null */
+  private static final int UNSPECIFIED_ERROR = -1;
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;
   private final int errorCode;
 
-  private String className;
+  private final String className;
   
+  /**
+   * @param className wrapped exception, may be null
+   * @param msg may be null
+   */
   public RemoteException(String className, String msg) {
-super(msg);
-this.className = className;
-errorCode = -1;
+this(className, msg, null);
   }
   
+  /**
+   * @param className wrapped exception, may be null
+   * @param msg may be null
+   * @param erCode may be null
+   */
   public RemoteException(String className, String msg, RpcErrorCodeProto 
erCode) {
 super(msg);
 this.className = className;
 if (erCode != null)
   errorCode = erCode.getNumber();
 else 
-  errorCode = -1;
+  errorCode = UNSPECIFIED_ERROR;
   }
   
+  /**
+   * @return 

hadoop git commit: Move HADOOP-11581 in CHANGES.txt from 3.0.0 to 2.8.0.

2015-05-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk d4a2830b6 - eb4c9ddeb


Move HADOOP-11581 in CHANGES.txt from 3.0.0 to 2.8.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb4c9dde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb4c9dde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb4c9dde

Branch: refs/heads/trunk
Commit: eb4c9ddeb54e440d637a45d59d7c127ad9bcbaff
Parents: d4a2830
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue May 19 18:38:35 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Tue May 19 18:38:35 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb4c9dde/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8c7c978..baf9a0f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -424,9 +424,6 @@ Trunk (Unreleased)
 HADOOP-11559. Add links to RackAwareness and InterfaceClassification
 to site index (Masatake Iwasaki via aw)
 
-HADOOP-11581. Multithreaded correctness Warnings
-#org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)
-
 HADOOP-11580. Remove SingleNodeSetup.md from trunk (aajisaka)
 
 HADOOP-11583. Fix syntax error in SecureMode.md (Masatake Iwasaki via aw)
@@ -713,6 +710,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-10582. Fix the test case for copying to non-existent dir in
 TestFsShellCopy. (Kousuke Saruta via aajisaka)
 
+HADOOP-11581. Multithreaded correctness Warnings
+#org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager. Contributed by Vinod Kumar Vavilapalli.

2015-05-19 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 6b03ec513 - 6c7840f5b


YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager. Contributed by 
Vinod Kumar Vavilapalli.

(cherry picked from commit 7401e5b5e8060b6b027d714b5ceb641fcfe5b598)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c7840f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c7840f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c7840f5

Branch: refs/heads/branch-2.7
Commit: 6c7840f5b542fb1c38b66154cd8db31c048d6e60
Parents: 6b03ec5
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed May 20 08:53:50 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed May 20 09:02:30 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/RMAppManager.java   | 1 +
 .../server/resourcemanager/recovery/FileSystemRMStateStore.java   | 2 +-
 3 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c7840f5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fa1b5f8..3c5cf72 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -100,6 +100,9 @@ Release 2.7.1 - UNRELEASED
 
 YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via 
xgong)
 
+YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager.
+(Vinod Kumar Vavilapalli via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c7840f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index e511ff0..a9ebf7d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -230,6 +230,7 @@ public class RMAppManager implements 
EventHandlerRMAppManagerEvent,
 success = true;
 break;
   default:
+break;
 }
 
 if (success) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c7840f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 0f68365..b83a1c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -98,7 +98,7 @@ public class FileSystemRMStateStore extends RMStateStore {
   private Path dtSequenceNumberPath = null;
   private int fsNumRetries;
   private long fsRetryInterval;
-  private boolean isHDFS;
+  private volatile boolean isHDFS;
 
   @VisibleForTesting
   Path fsWorkingPath;



hadoop git commit: YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use NodeLabel object instead of String. (Naganarasimha G R via wangda)

2015-05-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 12d6c5ce4 - b37da52a1


YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use NodeLabel 
object instead of String. (Naganarasimha G R via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b37da52a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b37da52a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b37da52a

Branch: refs/heads/trunk
Commit: b37da52a1c4fb3da2bd21bfadc5ec61c5f953a59
Parents: 12d6c5c
Author: Wangda Tan wan...@apache.org
Authored: Tue May 19 16:34:17 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue May 19 16:34:17 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../src/main/proto/yarn_protos.proto|  4 ---
 .../nodelabels/CommonNodeLabelsManager.java |  2 ++
 .../yarn/nodelabels/NodeLabelTestBase.java  | 12 +++
 .../protocolrecords/NodeHeartbeatRequest.java   |  7 ++--
 .../RegisterNodeManagerRequest.java |  7 ++--
 .../impl/pb/NodeHeartbeatRequestPBImpl.java | 34 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java| 35 +++-
 .../yarn_server_common_service_protos.proto |  8 +++--
 .../hadoop/yarn/TestYarnServerApiClasses.java   | 19 ++-
 .../nodemanager/NodeStatusUpdaterImpl.java  | 23 +++--
 .../nodelabels/NodeLabelsProvider.java  |  3 +-
 .../TestNodeStatusUpdaterForLabels.java | 23 +++--
 .../resourcemanager/ResourceTrackerService.java | 18 --
 .../TestResourceTrackerService.java | 25 +++---
 15 files changed, 149 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5a6fb38..ab6f488 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -247,6 +247,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3362. Add node label usage in RM CapacityScheduler web UI.
 (Naganarasimha G R via wangda)
 
+YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use 
+NodeLabel object instead of String. (Naganarasimha G R via wangda)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 4095676..3c4aa52 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -253,10 +253,6 @@ message NodeIdToLabelsProto {
   repeated string nodeLabels = 2;
 }
 
-message StringArrayProto {
-  repeated string elements = 1;
-}
-
 message LabelsToNodeIdsProto {
   optional string nodeLabels = 1;
   repeated NodeIdProto nodeId = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b37da52a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index bf34837..badf4d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -39,6 +39,7 @@ import java.util.regex.Pattern;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableSet;
 
+@Private
 public class CommonNodeLabelsManager extends AbstractService {
   protected 

hadoop git commit: YARN-3583. Support of NodeLabel object instead of plain String in YarnClient side. (Sunil G via wangda)

2015-05-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk b37da52a1 - 563eb1ad2


YARN-3583. Support of NodeLabel object instead of plain String in YarnClient 
side. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/563eb1ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/563eb1ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/563eb1ad

Branch: refs/heads/trunk
Commit: 563eb1ad2ae848a23bbbf32ebfaf107e8fa14e87
Parents: b37da52
Author: Wangda Tan wan...@apache.org
Authored: Tue May 19 16:54:38 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue May 19 16:54:38 2015 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   6 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../GetLabelsToNodesResponse.java   |   7 +-
 .../GetNodesToLabelsResponse.java   |   7 +-
 ..._server_resourcemanager_service_protos.proto |   7 +-
 .../src/main/proto/yarn_protos.proto|   6 +-
 .../src/main/proto/yarn_service_protos.proto|   2 +-
 .../hadoop/yarn/client/api/YarnClient.java  |   8 +-
 .../yarn/client/api/impl/YarnClientImpl.java|   6 +-
 .../yarn/client/api/impl/TestYarnClient.java|  74 +++--
 .../impl/pb/GetLabelsToNodesResponsePBImpl.java |  28 +++--
 .../impl/pb/GetNodesToLabelsResponsePBImpl.java |  58 ++
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java|  18 +--
 .../server/resourcemanager/ClientRMService.java |   6 +-
 .../resourcemanager/TestClientRMService.java| 110 +++
 15 files changed, 226 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 2b7cd5f..90f6876 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -444,19 +444,19 @@ public class ResourceMgrDelegate extends YarnClient {
   }
 
   @Override
-  public MapNodeId, SetString getNodeToLabels() throws YarnException,
+  public MapNodeId, SetNodeLabel getNodeToLabels() throws YarnException,
   IOException {
 return client.getNodeToLabels();
   }
 
   @Override
-  public MapString, SetNodeId getLabelsToNodes() throws YarnException,
+  public MapNodeLabel, SetNodeId getLabelsToNodes() throws YarnException,
   IOException {
 return client.getLabelsToNodes();
   }
 
   @Override
-  public MapString, SetNodeId getLabelsToNodes(SetString labels)
+  public MapNodeLabel, SetNodeId getLabelsToNodes(SetString labels)
   throws YarnException, IOException {
 return client.getLabelsToNodes(labels);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ab6f488..9ba9fd8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -250,6 +250,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use 
 NodeLabel object instead of String. (Naganarasimha G R via wangda)
 
+YARN-3583. Support of NodeLabel object instead of plain String 
+in YarnClient side. (Sunil G via wangda)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/563eb1ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
index f105359..da2be28 100644
--- 

hadoop git commit: HDFS-8428. Erasure Coding: Fix the NullPointerException when deleting file. Contributed by Yi Liu.

2015-05-19 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 3c36df154 - bf3c28a89


HDFS-8428. Erasure Coding: Fix the NullPointerException when deleting file. 
Contributed by Yi Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf3c28a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf3c28a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf3c28a8

Branch: refs/heads/HDFS-7285
Commit: bf3c28a89c68b605579f95d1ea001bf4a44899b1
Parents: 3c36df1
Author: Zhe Zhang zhezh...@cloudera.com
Authored: Tue May 19 17:25:27 2015 -0700
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Tue May 19 17:25:27 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt| 9 ++---
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java| 2 +-
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3c28a8/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index d6c9dba..48bc9d6 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -59,7 +59,7 @@
 
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
-
+
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
 HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks.
@@ -110,7 +110,7 @@
 
 HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to 
 create BlockReader. (szetszwo via Zhe Zhang)
-
+
 HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema
 in FileSystemLinkResolver. (szetszwo via Zhe Zhang)
 
@@ -172,7 +172,7 @@
 
 HDFS-8324. Add trace info to DFSClient#getErasureCodingZoneInfo(..) 
(vinayakumarb via 
 umamahesh)
-
+
 HDFS-7672. Handle write failure for stripping blocks and refactor the
 existing code in DFSStripedOutputStream and StripedDataStreamer.  
(szetszwo)
 
@@ -235,3 +235,6 @@
 (Rakesh R via waltersu4549)
 
 HDFS-8375. Add cellSize as an XAttr to ECZone. ( Vinayakumar B via zhz).
+
+HDFS-8428. Erasure Coding: Fix the NullPointerException when deleting file.
+(Yi Liu via zhz).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3c28a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index de3b50f..f67de65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3389,7 +3389,7 @@ public class BlockManager {
 for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) {
   switch (rdbi.getStatus()) {
   case DELETED_BLOCK:
-removeStoredBlock(storageInfo, getStoredBlock(rdbi.getBlock()), node);
+removeStoredBlock(storageInfo, rdbi.getBlock(), node);
 deleted++;
 break;
   case RECEIVED_BLOCK:



hadoop git commit: YARN-3583. Support of NodeLabel object instead of plain String in YarnClient side. (Sunil G via wangda)

2015-05-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d022c5aba - b5ba6979b


YARN-3583. Support of NodeLabel object instead of plain String in YarnClient 
side. (Sunil G via wangda)

(cherry picked from commit 563eb1ad2ae848a23bbbf32ebfaf107e8fa14e87)
(cherry picked from commit b0d22b0c606fad6b4ab5443c0aed07c829b46726)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5ba6979
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5ba6979
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5ba6979

Branch: refs/heads/branch-2
Commit: b5ba6979b7bfd216166f040ec1d66c425307516c
Parents: d022c5a
Author: Wangda Tan wan...@apache.org
Authored: Tue May 19 16:54:38 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue May 19 17:37:24 2015 -0700

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |   6 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../GetLabelsToNodesResponse.java   |   7 +-
 .../GetNodesToLabelsResponse.java   |   7 +-
 ..._server_resourcemanager_service_protos.proto |   7 +-
 .../src/main/proto/yarn_protos.proto|   6 +-
 .../src/main/proto/yarn_service_protos.proto|   2 +-
 .../hadoop/yarn/client/api/YarnClient.java  |   8 +-
 .../yarn/client/api/impl/YarnClientImpl.java|   6 +-
 .../yarn/client/api/impl/TestYarnClient.java|  74 +++--
 .../impl/pb/GetLabelsToNodesResponsePBImpl.java |  28 +++--
 .../impl/pb/GetNodesToLabelsResponsePBImpl.java |  58 ++
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java|  18 +--
 .../server/resourcemanager/ClientRMService.java |   6 +-
 .../resourcemanager/TestClientRMService.java| 110 +++
 15 files changed, 226 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ba6979/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 2b7cd5f..90f6876 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -444,19 +444,19 @@ public class ResourceMgrDelegate extends YarnClient {
   }
 
   @Override
-  public MapNodeId, SetString getNodeToLabels() throws YarnException,
+  public MapNodeId, SetNodeLabel getNodeToLabels() throws YarnException,
   IOException {
 return client.getNodeToLabels();
   }
 
   @Override
-  public MapString, SetNodeId getLabelsToNodes() throws YarnException,
+  public MapNodeLabel, SetNodeId getLabelsToNodes() throws YarnException,
   IOException {
 return client.getLabelsToNodes();
   }
 
   @Override
-  public MapString, SetNodeId getLabelsToNodes(SetString labels)
+  public MapNodeLabel, SetNodeId getLabelsToNodes(SetString labels)
   throws YarnException, IOException {
 return client.getLabelsToNodes(labels);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ba6979/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 67c43fb..b3ccc35 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -205,6 +205,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use 
 NodeLabel object instead of String. (Naganarasimha G R via wangda)
 
+YARN-3583. Support of NodeLabel object instead of plain String 
+in YarnClient side. (Sunil G via wangda)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5ba6979/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetLabelsToNodesResponse.java
index f105359..da2be28 

hadoop git commit: HADOOP-11995. Make jetty version configurable from the maven command line. Contributed by Sriharsha Devineni.

2015-05-19 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b5ba6979b - ed320da2b


HADOOP-11995. Make jetty version configurable from the maven command line. 
Contributed by Sriharsha Devineni.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed320da2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed320da2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed320da2

Branch: refs/heads/branch-2
Commit: ed320da2ba5b7c32f34a69ab213f35d988a7ecba
Parents: b5ba697
Author: Haohui Mai whe...@apache.org
Authored: Tue May 19 18:28:20 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue May 19 18:28:34 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-project/pom.xml  | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed320da2/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f2a1572..266bca4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -122,6 +122,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-11103. Clean up RemoteException (Sean Busbey via vinayakumarb)
 
+HADOOP-11995. Make jetty version configurable from the maven command line.
+(Sriharsha Devineni via wheat9)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed320da2/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 635596e..da84fe2 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -32,7 +32,7 @@
   properties
 failIfNoTestsfalse/failIfNoTests
 
maven.test.redirectTestOutputToFiletrue/maven.test.redirectTestOutputToFile
-
+jetty.version6.1.26/jetty.version
 test.exclude_/test.exclude
 test.exclude.pattern_/test.exclude.pattern
 
@@ -460,7 +460,7 @@
   dependency
 groupIdorg.mortbay.jetty/groupId
 artifactIdjetty/artifactId
-version6.1.26/version
+version${jetty.version}/version
 exclusions
   exclusion
 groupIdorg.mortbay.jetty/groupId
@@ -471,7 +471,7 @@
   dependency
 groupIdorg.mortbay.jetty/groupId
 artifactIdjetty-util/artifactId
-version6.1.26/version
+version${jetty.version}/version
   /dependency
   dependency
 groupIdorg.apache.tomcat.embed/groupId
@@ -590,7 +590,7 @@
   dependency
 groupIdorg.mortbay.jetty/groupId
 artifactIdjetty-servlet-tester/artifactId
-version6.1.26/version
+version${jetty.version}/version
   /dependency
   dependency
 groupIdcommons-logging/groupId



hadoop git commit: HADOOP-11995. Make jetty version configurable from the maven command line. Contributed by Sriharsha Devineni.

2015-05-19 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7401e5b5e - ce53c8eb0


HADOOP-11995. Make jetty version configurable from the maven command line. 
Contributed by Sriharsha Devineni.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce53c8eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce53c8eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce53c8eb

Branch: refs/heads/trunk
Commit: ce53c8eb0ccc582957ba1f4c0b7938db00f6ca31
Parents: 7401e5b
Author: Haohui Mai whe...@apache.org
Authored: Tue May 19 18:28:20 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue May 19 18:28:20 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-project/pom.xml  | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce53c8eb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3e7cb39..b0b8fb7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -587,6 +587,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-11103. Clean up RemoteException (Sean Busbey via vinayakumarb)
 
+HADOOP-11995. Make jetty version configurable from the maven command line.
+(Sriharsha Devineni via wheat9)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce53c8eb/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index f23a2dd..78903fa 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -32,7 +32,7 @@
   properties
 failIfNoTestsfalse/failIfNoTests
 
maven.test.redirectTestOutputToFiletrue/maven.test.redirectTestOutputToFile
-
+jetty.version6.1.26/jetty.version
 test.exclude_/test.exclude
 test.exclude.pattern_/test.exclude.pattern
 
@@ -461,7 +461,7 @@
   dependency
 groupIdorg.mortbay.jetty/groupId
 artifactIdjetty/artifactId
-version6.1.26/version
+version${jetty.version}/version
 exclusions
   exclusion
 groupIdorg.mortbay.jetty/groupId
@@ -472,7 +472,7 @@
   dependency
 groupIdorg.mortbay.jetty/groupId
 artifactIdjetty-util/artifactId
-version6.1.26/version
+version${jetty.version}/version
   /dependency
   dependency
 groupIdorg.apache.tomcat.embed/groupId
@@ -591,7 +591,7 @@
   dependency
 groupIdorg.mortbay.jetty/groupId
 artifactIdjetty-servlet-tester/artifactId
-version6.1.26/version
+version${jetty.version}/version
   /dependency
   dependency
 groupIdcommons-logging/groupId



hadoop git commit: YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use NodeLabel object instead of String. (Naganarasimha G R via wangda)

2015-05-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 511a503aa - 7b97f049e


YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use NodeLabel 
object instead of String. (Naganarasimha G R via wangda)

(cherry picked from commit b37da52a1c4fb3da2bd21bfadc5ec61c5f953a59)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b97f049
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b97f049
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b97f049

Branch: refs/heads/branch-2
Commit: 7b97f049ee83e1a637dab0e42003b685d3644a29
Parents: 511a503
Author: Wangda Tan wan...@apache.org
Authored: Tue May 19 16:34:17 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Tue May 19 16:35:23 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../src/main/proto/yarn_protos.proto|  4 ---
 .../nodelabels/CommonNodeLabelsManager.java |  2 ++
 .../yarn/nodelabels/NodeLabelTestBase.java  | 12 +++
 .../protocolrecords/NodeHeartbeatRequest.java   |  7 ++--
 .../RegisterNodeManagerRequest.java |  7 ++--
 .../impl/pb/NodeHeartbeatRequestPBImpl.java | 34 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java| 35 +++-
 .../yarn_server_common_service_protos.proto |  8 +++--
 .../hadoop/yarn/TestYarnServerApiClasses.java   | 19 ++-
 .../nodemanager/NodeStatusUpdaterImpl.java  | 23 +++--
 .../nodelabels/NodeLabelsProvider.java  |  3 +-
 .../TestNodeStatusUpdaterForLabels.java | 23 +++--
 .../resourcemanager/ResourceTrackerService.java | 18 --
 .../TestResourceTrackerService.java | 25 +++---
 15 files changed, 149 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b97f049/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 16cb27b..8a9298a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -202,6 +202,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3362. Add node label usage in RM CapacityScheduler web UI.
 (Naganarasimha G R via wangda)
 
+YARN-3565. NodeHeartbeatRequest/RegisterNodeManagerRequest should use 
+NodeLabel object instead of String. (Naganarasimha G R via wangda)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b97f049/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 4095676..3c4aa52 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -253,10 +253,6 @@ message NodeIdToLabelsProto {
   repeated string nodeLabels = 2;
 }
 
-message StringArrayProto {
-  repeated string elements = 1;
-}
-
 message LabelsToNodeIdsProto {
   optional string nodeLabels = 1;
   repeated NodeIdProto nodeId = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b97f049/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index bf34837..badf4d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -39,6 +39,7 @@ import java.util.regex.Pattern;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableSet;
 
+@Private
 

hadoop git commit: YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager. Contributed by Vinod Kumar Vavilapalli.

2015-05-19 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 563eb1ad2 - 7401e5b5e


YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager. Contributed by 
Vinod Kumar Vavilapalli.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7401e5b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7401e5b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7401e5b5

Branch: refs/heads/trunk
Commit: 7401e5b5e8060b6b027d714b5ceb641fcfe5b598
Parents: 563eb1a
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed May 20 08:53:50 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed May 20 08:59:26 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/RMAppManager.java   | 1 +
 .../server/resourcemanager/recovery/FileSystemRMStateStore.java   | 2 +-
 3 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7401e5b5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9ba9fd8..4bd4132 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -523,6 +523,9 @@ Release 2.7.1 - UNRELEASED
 
 YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via 
xgong)
 
+YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager.
+(Vinod Kumar Vavilapalli via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7401e5b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7990421..2d9431d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -230,6 +230,7 @@ public class RMAppManager implements 
EventHandlerRMAppManagerEvent,
 success = true;
 break;
   default:
+break;
 }
 
 if (success) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7401e5b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 6920bb5..0d97d6f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -100,7 +100,7 @@ public class FileSystemRMStateStore extends RMStateStore {
   private Path dtSequenceNumberPath = null;
   private int fsNumRetries;
   private long fsRetryInterval;
-  private boolean isHDFS;
+  private volatile boolean isHDFS;
 
   @VisibleForTesting
   Path fsWorkingPath;



hadoop git commit: YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager. Contributed by Vinod Kumar Vavilapalli.

2015-05-19 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7b97f049e - d022c5aba


YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager. Contributed by 
Vinod Kumar Vavilapalli.

(cherry picked from commit 7401e5b5e8060b6b027d714b5ceb641fcfe5b598)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d022c5ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d022c5ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d022c5ab

Branch: refs/heads/branch-2
Commit: d022c5aba852e825911ff37e8b0f160cb9c60d2f
Parents: 7b97f04
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed May 20 08:53:50 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed May 20 09:01:42 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/RMAppManager.java   | 1 +
 .../server/resourcemanager/recovery/FileSystemRMStateStore.java   | 2 +-
 3 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d022c5ab/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8a9298a..67c43fb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -478,6 +478,9 @@ Release 2.7.1 - UNRELEASED
 
 YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via 
xgong)
 
+YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager.
+(Vinod Kumar Vavilapalli via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d022c5ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7990421..2d9431d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -230,6 +230,7 @@ public class RMAppManager implements 
EventHandlerRMAppManagerEvent,
 success = true;
 break;
   default:
+break;
 }
 
 if (success) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d022c5ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 6920bb5..0d97d6f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -100,7 +100,7 @@ public class FileSystemRMStateStore extends RMStateStore {
   private Path dtSequenceNumberPath = null;
   private int fsNumRetries;
   private long fsRetryInterval;
-  private boolean isHDFS;
+  private volatile boolean isHDFS;
 
   @VisibleForTesting
   Path fsWorkingPath;



hadoop git commit: HDFS-8323. Bump GenerationStamp for write faliure in DFSStripedOutputStream. Contributed by Tsz Wo Nicholas Sze.

2015-05-19 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 bf3c28a89 - 4dd4aa577


HDFS-8323. Bump GenerationStamp for write faliure in DFSStripedOutputStream. 
Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4dd4aa57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4dd4aa57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4dd4aa57

Branch: refs/heads/HDFS-7285
Commit: 4dd4aa5774f9c60ef0e5217875bf2c55c01f4ff9
Parents: bf3c28a
Author: Jing Zhao ji...@apache.org
Authored: Tue May 19 21:19:51 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Tue May 19 21:19:51 2015 -0700

--
 .../hdfs/protocol/LocatedStripedBlock.java  |  12 ++-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/DFSStripedOutputStream.java | 106 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java|  15 ++-
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  67 +---
 .../blockmanagement/DatanodeStorageInfo.java|  15 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  38 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  40 +--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   8 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  69 +++-
 .../server/namenode/TestAddStripedBlocks.java   |  12 ++-
 12 files changed, 258 insertions(+), 129 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4dd4aa57/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
index 93a5948..dc5a77f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
@@ -31,15 +31,21 @@ import java.util.Arrays;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class LocatedStripedBlock extends LocatedBlock {
+  private static final int[] EMPTY_INDICES = {};
+
   private int[] blockIndices;
 
   public LocatedStripedBlock(ExtendedBlock b, DatanodeInfo[] locs,
   String[] storageIDs, StorageType[] storageTypes, int[] indices,
   long startOffset, boolean corrupt, DatanodeInfo[] cachedLocs) {
 super(b, locs, storageIDs, storageTypes, startOffset, corrupt, cachedLocs);
-assert indices != null  indices.length == locs.length;
-this.blockIndices = new int[indices.length];
-System.arraycopy(indices, 0, blockIndices, 0, indices.length);
+
+if (indices == null) {
+  this.blockIndices = EMPTY_INDICES;
+} else {
+  this.blockIndices = new int[indices.length];
+  System.arraycopy(indices, 0, blockIndices, 0, indices.length);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4dd4aa57/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 48bc9d6..b608b10 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -238,3 +238,6 @@
 
 HDFS-8428. Erasure Coding: Fix the NullPointerException when deleting file.
 (Yi Liu via zhz).
+
+HDFS-8323. Bump GenerationStamp for write faliure in 
DFSStripedOutputStream.
+(Tsz Wo Nicholas Sze via jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4dd4aa57/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 4399a37..8eed6ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -33,7 +33,6 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
-import 

hadoop git commit: HDFS-8131. Implement a space balanced block placement policy. Contributed by Liu Shaohui.

2015-05-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk eb4c9ddeb - de30d66b2


HDFS-8131. Implement a space balanced block placement policy. Contributed by 
Liu Shaohui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de30d66b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de30d66b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de30d66b

Branch: refs/heads/trunk
Commit: de30d66b2673d0344346fb985e786247ca682317
Parents: eb4c9dd
Author: Kihwal Lee kih...@apache.org
Authored: Tue May 19 08:04:38 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Tue May 19 08:04:38 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../AvailableSpaceBlockPlacementPolicy.java |  95 +++
 .../BlockPlacementPolicyDefault.java|  11 +-
 .../TestAvailableSpaceBlockPlacementPolicy.java | 167 +++
 5 files changed, 279 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de30d66b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e5fcba2..76888a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -345,6 +345,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7891. A block placement policy with best rack failure tolerance.
 (Walter Su via szetszwo)
 
+HDFS-8131. Implement a space balanced block placement policy (Liu Shaohui
+via kihwal)
+
   IMPROVEMENTS
 
 HDFS-3918. EditLogTailer shouldn't log WARN when other node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de30d66b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c903e76..9c19f91 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -519,7 +519,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY = 
dfs.web.authentication.kerberos.keytab;
   public static final String  DFS_NAMENODE_MAX_OP_SIZE_KEY = 
dfs.namenode.max.op.size;
   public static final int DFS_NAMENODE_MAX_OP_SIZE_DEFAULT = 50 * 1024 * 
1024;
-  
+  public static final String  
DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY
 =
+  
dfs.namenode.available-space-block-placement-policy.balanced-space-preference-fraction;
+  public static final float   
DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT
 =
+  0.6f;
+
   public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = 
dfs.block.local-path-access.user;
   public static final String DFS_DOMAIN_SOCKET_PATH_KEY = 
dfs.domain.socket.path;
   public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = ;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de30d66b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
new file mode 100644
index 000..74c1c78
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR 

hadoop git commit: HDFS-8131. Implement a space balanced block placement policy. Contributed by Liu Shaohui. (cherry picked from commit de30d66b2673d0344346fb985e786247ca682317)

2015-05-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6c4aa89e5 - 9e656bfa9


HDFS-8131. Implement a space balanced block placement policy. Contributed by 
Liu Shaohui.
(cherry picked from commit de30d66b2673d0344346fb985e786247ca682317)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e656bfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e656bfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e656bfa

Branch: refs/heads/branch-2
Commit: 9e656bfa94f0d1617046b9ba7646ad15895de5c2
Parents: 6c4aa89
Author: Kihwal Lee kih...@apache.org
Authored: Tue May 19 08:06:07 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Tue May 19 08:06:07 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +-
 .../AvailableSpaceBlockPlacementPolicy.java |  95 +++
 .../BlockPlacementPolicyDefault.java|  11 +-
 .../TestAvailableSpaceBlockPlacementPolicy.java | 167 +++
 5 files changed, 279 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e656bfa/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 07bbd36..3bbfd69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -11,6 +11,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7891. A block placement policy with best rack failure tolerance.
 (Walter Su via szetszwo)
 
+HDFS-8131. Implement a space balanced block placement policy (Liu Shaohui
+via kihwal)
+
   IMPROVEMENTS
 
 HDFS-3918. EditLogTailer shouldn't log WARN when other node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e656bfa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b3e511d..f78c870 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -521,7 +521,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY = 
dfs.web.authentication.kerberos.keytab;
   public static final String  DFS_NAMENODE_MAX_OP_SIZE_KEY = 
dfs.namenode.max.op.size;
   public static final int DFS_NAMENODE_MAX_OP_SIZE_DEFAULT = 50 * 1024 * 
1024;
-  
+  public static final String  
DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY
 =
+  
dfs.namenode.available-space-block-placement-policy.balanced-space-preference-fraction;
+  public static final float   
DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT
 =
+  0.6f;
+
   public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = 
dfs.block.local-path-access.user;
   public static final String DFS_DOMAIN_SOCKET_PATH_KEY = 
dfs.domain.socket.path;
   public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = ;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e656bfa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
new file mode 100644
index 000..74c1c78
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the 

hadoop git commit: YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. Contributed by Weiwei Yang

2015-05-19 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9e656bfa9 - d39039d54


YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. Contributed by Weiwei 
Yang

(cherry picked from commit 5009ad4a7f712fc578b461ecec53f7f97eaaed0c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d39039d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d39039d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d39039d5

Branch: refs/heads/branch-2
Commit: d39039d54df7934297f39c11215f2954b8803200
Parents: 9e656bf
Author: Xuan xg...@apache.org
Authored: Tue May 19 09:56:01 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue May 19 09:57:13 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 +
 .../hadoop/yarn/client/TestRMFailover.java  | 78 ++--
 2 files changed, 40 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d39039d5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7ac38ce..eb5c183 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -467,6 +467,8 @@ Release 2.7.1 - UNRELEASED
 YARN-3526. ApplicationMaster tracking URL is incorrectly redirected
 on a QJM cluster. (Weiwei Yang via xgong)
 
+YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via 
xgong)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d39039d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index d4fc5c1..cd22743 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -21,13 +21,13 @@ package org.apache.hadoop.yarn.client;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
-import java.util.List;
-import java.util.Map;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestRMFailover extends ClientBaseWithFixes {
@@ -275,10 +274,6 @@ public class TestRMFailover extends ClientBaseWithFixes {
 assertEquals(404, response.getResponseCode());
   }
 
-  // ignore this testcase, Always gets too many redirect loops exception
-  // Probably because of the limitation of MiniYARNCluster.
-  // Verified the behavior in a single node cluster.
-  @Ignore
   @Test
   public void testRMWebAppRedirect() throws YarnException,
   InterruptedException, IOException {
@@ -290,59 +285,62 @@ public class TestRMFailover extends ClientBaseWithFixes {
 getAdminService(0).transitionToActive(req);
 String rm1Url = http://0.0.0.0:18088;;
 String rm2Url = http://0.0.0.0:28088;;
-String header = getHeader(Refresh, rm2Url);
-assertTrue(header.contains(; url= + rm1Url));
+String redirectURL = getRedirectURL(rm2Url);
+// if uri is null, RMWebAppFilter will append a slash at the trail of the 
redirection url
+assertEquals(redirectURL,rm1Url+/);
 
-header = getHeader(Refresh, rm2Url + /metrics);
-assertTrue(header.contains(; url= + rm1Url));
+redirectURL = getRedirectURL(rm2Url + /metrics);
+assertEquals(redirectURL,rm1Url + /metrics);
 
-header = getHeader(Refresh, rm2Url + /jmx);
-assertTrue(header.contains(; url= + rm1Url));
+redirectURL = getRedirectURL(rm2Url + /jmx);
+assertEquals(redirectURL,rm1Url + /jmx);
 
 // standby RM links /conf, /stacks, /logLevel, /static, /logs,
 // /cluster/cluster as well as webService
 // /ws/v1/cluster/info should not be redirected to active RM
-header = getHeader(Refresh, rm2Url + /cluster/cluster);
-assertEquals(null, header);
+redirectURL = getRedirectURL(rm2Url + 

hadoop git commit: YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. Contributed by Weiwei Yang

2015-05-19 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 59d1b4a32 - f0399f56e


YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. Contributed by Weiwei 
Yang

(cherry picked from commit 5009ad4a7f712fc578b461ecec53f7f97eaaed0c)
(cherry picked from commit d39039d54df7934297f39c11215f2954b8803200)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0399f56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0399f56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0399f56

Branch: refs/heads/branch-2.7
Commit: f0399f56e59c4343cfe106b6e4565d3239e7317c
Parents: 59d1b4a
Author: Xuan xg...@apache.org
Authored: Tue May 19 09:56:01 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue May 19 09:57:44 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 +
 .../hadoop/yarn/client/TestRMFailover.java  | 78 ++--
 2 files changed, 40 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0399f56/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 180fdc5..fa1b5f8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -98,6 +98,8 @@ Release 2.7.1 - UNRELEASED
 YARN-3526. ApplicationMaster tracking URL is incorrectly redirected
 on a QJM cluster. (Weiwei Yang via xgong)
 
+YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via 
xgong)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0399f56/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index d4fc5c1..cd22743 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -21,13 +21,13 @@ package org.apache.hadoop.yarn.client;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
-import java.util.List;
-import java.util.Map;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestRMFailover extends ClientBaseWithFixes {
@@ -275,10 +274,6 @@ public class TestRMFailover extends ClientBaseWithFixes {
 assertEquals(404, response.getResponseCode());
   }
 
-  // ignore this testcase, Always gets too many redirect loops exception
-  // Probably because of the limitation of MiniYARNCluster.
-  // Verified the behavior in a single node cluster.
-  @Ignore
   @Test
   public void testRMWebAppRedirect() throws YarnException,
   InterruptedException, IOException {
@@ -290,59 +285,62 @@ public class TestRMFailover extends ClientBaseWithFixes {
 getAdminService(0).transitionToActive(req);
 String rm1Url = http://0.0.0.0:18088;;
 String rm2Url = http://0.0.0.0:28088;;
-String header = getHeader(Refresh, rm2Url);
-assertTrue(header.contains(; url= + rm1Url));
+String redirectURL = getRedirectURL(rm2Url);
+// if uri is null, RMWebAppFilter will append a slash at the trail of the 
redirection url
+assertEquals(redirectURL,rm1Url+/);
 
-header = getHeader(Refresh, rm2Url + /metrics);
-assertTrue(header.contains(; url= + rm1Url));
+redirectURL = getRedirectURL(rm2Url + /metrics);
+assertEquals(redirectURL,rm1Url + /metrics);
 
-header = getHeader(Refresh, rm2Url + /jmx);
-assertTrue(header.contains(; url= + rm1Url));
+redirectURL = getRedirectURL(rm2Url + /jmx);
+assertEquals(redirectURL,rm1Url + /jmx);
 
 // standby RM links /conf, /stacks, /logLevel, /static, /logs,
 // /cluster/cluster as well as webService
 // /ws/v1/cluster/info should not be redirected to active RM
-header = getHeader(Refresh, rm2Url + /cluster/cluster);
-

hadoop git commit: YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. Contributed by Weiwei Yang

2015-05-19 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk de30d66b2 - 5009ad4a7


YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. Contributed by Weiwei 
Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5009ad4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5009ad4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5009ad4a

Branch: refs/heads/trunk
Commit: 5009ad4a7f712fc578b461ecec53f7f97eaaed0c
Parents: de30d66
Author: Xuan xg...@apache.org
Authored: Tue May 19 09:56:01 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue May 19 09:56:01 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 +
 .../hadoop/yarn/client/TestRMFailover.java  | 78 ++--
 2 files changed, 40 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5009ad4a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c6f753d..e17e9c7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -509,6 +509,8 @@ Release 2.7.1 - UNRELEASED
 YARN-3526. ApplicationMaster tracking URL is incorrectly redirected
 on a QJM cluster. (Weiwei Yang via xgong)
 
+YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via 
xgong)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5009ad4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index d4fc5c1..cd22743 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -21,13 +21,13 @@ package org.apache.hadoop.yarn.client;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
-import java.util.List;
-import java.util.Map;
+import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestRMFailover extends ClientBaseWithFixes {
@@ -275,10 +274,6 @@ public class TestRMFailover extends ClientBaseWithFixes {
 assertEquals(404, response.getResponseCode());
   }
 
-  // ignore this testcase, Always gets too many redirect loops exception
-  // Probably because of the limitation of MiniYARNCluster.
-  // Verified the behavior in a single node cluster.
-  @Ignore
   @Test
   public void testRMWebAppRedirect() throws YarnException,
   InterruptedException, IOException {
@@ -290,59 +285,62 @@ public class TestRMFailover extends ClientBaseWithFixes {
 getAdminService(0).transitionToActive(req);
 String rm1Url = http://0.0.0.0:18088;;
 String rm2Url = http://0.0.0.0:28088;;
-String header = getHeader(Refresh, rm2Url);
-assertTrue(header.contains(; url= + rm1Url));
+String redirectURL = getRedirectURL(rm2Url);
+// if uri is null, RMWebAppFilter will append a slash at the trail of the 
redirection url
+assertEquals(redirectURL,rm1Url+/);
 
-header = getHeader(Refresh, rm2Url + /metrics);
-assertTrue(header.contains(; url= + rm1Url));
+redirectURL = getRedirectURL(rm2Url + /metrics);
+assertEquals(redirectURL,rm1Url + /metrics);
 
-header = getHeader(Refresh, rm2Url + /jmx);
-assertTrue(header.contains(; url= + rm1Url));
+redirectURL = getRedirectURL(rm2Url + /jmx);
+assertEquals(redirectURL,rm1Url + /jmx);
 
 // standby RM links /conf, /stacks, /logLevel, /static, /logs,
 // /cluster/cluster as well as webService
 // /ws/v1/cluster/info should not be redirected to active RM
-header = getHeader(Refresh, rm2Url + /cluster/cluster);
-assertEquals(null, header);
+redirectURL = getRedirectURL(rm2Url + /cluster/cluster);
+assertNull(redirectURL);
 
-header = 

hadoop git commit: Moving MAPREDUCE-6361 to 2.7.1 CHANGES.txt

2015-05-19 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8860e352c - 8ca1dfeeb


Moving MAPREDUCE-6361 to 2.7.1 CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ca1dfee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ca1dfee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ca1dfee

Branch: refs/heads/trunk
Commit: 8ca1dfeebb660741aa6e5b137cd1088815b614cf
Parents: 8860e35
Author: Junping Du junping...@apache.org
Authored: Tue May 19 11:48:06 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Tue May 19 11:48:51 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ca1dfee/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 7e34297..10703c6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -415,10 +415,6 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir, 
 introducing compile error (Arshad Mohammad via vinayakumarb)
 
-MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
-copySucceeded() in one thread and copyFailed() in another thread on the
-same host. (Junping Du via ozawa)
-
 MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort
 doesn't work. (Takuya Fukudome via ozawa)
 
@@ -465,6 +461,10 @@ Release 2.7.1 - UNRELEASED
 that they don't fail on history-server backed by DFSes with not so strong
 guarantees. (Craig Welch via vinodkv)
 
+MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
+copySucceeded() in one thread and copyFailed() in another thread on the
+same host. (Junping Du via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



hadoop git commit: Moving MAPREDUCE-6361 to 2.7.1 CHANGES.txt

2015-05-19 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9534e533d - dafe33efb


Moving MAPREDUCE-6361 to 2.7.1 CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dafe33ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dafe33ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dafe33ef

Branch: refs/heads/branch-2
Commit: dafe33efbf094cf223d58c9743fcf2f0a5812283
Parents: 9534e53
Author: Junping Du junping...@apache.org
Authored: Tue May 19 11:53:46 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Tue May 19 11:53:46 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dafe33ef/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9d943bd..8505841 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -157,10 +157,6 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir, 
 introducing compile error (Arshad Mohammad via vinayakumarb)
 
-MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
-copySucceeded() in one thread and copyFailed() in another thread on the
-same host. (Junping Du via ozawa)
-
 MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort
 doesn't work. (Takuya Fukudome via ozawa)
 
@@ -207,6 +203,10 @@ Release 2.7.1 - UNRELEASED
 that they don't fail on history-server backed by DFSes with not so strong
 guarantees. (Craig Welch via vinodkv)
 
+MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
+copySucceeded() in one thread and copyFailed() in another thread on the
+same host. (Junping Du via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get created with ACLs. (Gregory Chanan via asuresh)

2015-05-19 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8ca1dfeeb - fd3cb533d


HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get 
created with ACLs. (Gregory Chanan via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd3cb533
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd3cb533
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd3cb533

Branch: refs/heads/trunk
Commit: fd3cb533d2495ea220ab2e468835a43a784d7532
Parents: 8ca1dfe
Author: Arun Suresh asur...@apache.org
Authored: Tue May 19 11:35:57 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Tue May 19 11:35:57 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../ZKDelegationTokenSecretManager.java | 12 
 .../TestZKDelegationTokenSecretManager.java | 60 
 3 files changed, 75 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3cb533/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 10da9d7..e4537a3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -748,6 +748,9 @@ Release 2.7.1 - UNRELEASED
 HADOOP-11966. Variable cygwin is undefined in hadoop-config.sh when 
executed
 through hadoop-daemon.sh. (cnauroth)
 
+HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get
+created with ACLs. (Gregory Chanan via asuresh)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3cb533/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index 73c3ab8..da0e6ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -47,6 +47,7 @@ import 
org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
 import org.apache.curator.framework.recipes.shared.SharedCount;
 import org.apache.curator.framework.recipes.shared.VersionedValue;
 import org.apache.curator.retry.RetryNTimes;
+import org.apache.curator.utils.EnsurePath;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -298,6 +299,17 @@ public abstract class 
ZKDelegationTokenSecretManagerTokenIdent extends Abstract
   } catch (Exception e) {
 throw new IOException(Could not start Curator Framework, e);
   }
+} else {
+  // If namespace parents are implicitly created, they won't have ACLs.
+  // So, let's explicitly create them.
+  CuratorFramework nullNsFw = zkClient.usingNamespace(null);
+  EnsurePath ensureNs =
+nullNsFw.newNamespaceAwareEnsurePath(/ + zkClient.getNamespace());
+  try {
+ensureNs.ensure(nullNsFw.getZookeeperClient());
+  } catch (Exception e) {
+throw new IOException(Could not create namespace, e);
+  }
 }
 listenerThreadPool = Executors.newSingleThreadExecutor();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd3cb533/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
index 6435c0b..185a994 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
@@ -19,9 +19,16 @@
 package org.apache.hadoop.security.token.delegation;
 
 import java.io.IOException;
+import 

hadoop git commit: HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get created with ACLs. (Gregory Chanan via asuresh)

2015-05-19 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 dafe33efb - 00e2a0a49


HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get 
created with ACLs. (Gregory Chanan via asuresh)

(cherry picked from commit fd3cb533d2495ea220ab2e468835a43a784d7532)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00e2a0a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00e2a0a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00e2a0a4

Branch: refs/heads/branch-2
Commit: 00e2a0a4945e5e9d31d6655845384e76c01cdd0a
Parents: dafe33e
Author: Arun Suresh asur...@apache.org
Authored: Tue May 19 11:35:57 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Tue May 19 11:37:15 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../ZKDelegationTokenSecretManager.java | 12 
 .../TestZKDelegationTokenSecretManager.java | 60 
 3 files changed, 75 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00e2a0a4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index cd744f9..b201d9a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -292,6 +292,9 @@ Release 2.7.1 - UNRELEASED
 HADOOP-11966. Variable cygwin is undefined in hadoop-config.sh when 
executed
 through hadoop-daemon.sh. (cnauroth)
 
+HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get
+created with ACLs. (Gregory Chanan via asuresh)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00e2a0a4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index 73c3ab8..da0e6ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -47,6 +47,7 @@ import 
org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
 import org.apache.curator.framework.recipes.shared.SharedCount;
 import org.apache.curator.framework.recipes.shared.VersionedValue;
 import org.apache.curator.retry.RetryNTimes;
+import org.apache.curator.utils.EnsurePath;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -298,6 +299,17 @@ public abstract class 
ZKDelegationTokenSecretManagerTokenIdent extends Abstract
   } catch (Exception e) {
 throw new IOException(Could not start Curator Framework, e);
   }
+} else {
+  // If namespace parents are implicitly created, they won't have ACLs.
+  // So, let's explicitly create them.
+  CuratorFramework nullNsFw = zkClient.usingNamespace(null);
+  EnsurePath ensureNs =
+nullNsFw.newNamespaceAwareEnsurePath(/ + zkClient.getNamespace());
+  try {
+ensureNs.ensure(nullNsFw.getZookeeperClient());
+  } catch (Exception e) {
+throw new IOException(Could not create namespace, e);
+  }
 }
 listenerThreadPool = Executors.newSingleThreadExecutor();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00e2a0a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
index 6435c0b..185a994 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
@@ -19,9 +19,16 @@
 package 

hadoop git commit: HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get created with ACLs. (Gregory Chanan via asuresh)

2015-05-19 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7ac66f871 - 273d2f975


HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get 
created with ACLs. (Gregory Chanan via asuresh)

(cherry picked from commit fd3cb533d2495ea220ab2e468835a43a784d7532)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/273d2f97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/273d2f97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/273d2f97

Branch: refs/heads/branch-2.7
Commit: 273d2f975340e981d931e593bf68ffaf0bdc0e16
Parents: 7ac66f8
Author: Arun Suresh asur...@apache.org
Authored: Tue May 19 11:35:57 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Tue May 19 11:37:57 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../ZKDelegationTokenSecretManager.java | 12 
 .../TestZKDelegationTokenSecretManager.java | 60 
 3 files changed, 75 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/273d2f97/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f63a69a..7c787be 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -35,6 +35,9 @@ Release 2.7.1 - UNRELEASED
 HADOOP-11966. Variable cygwin is undefined in hadoop-config.sh when 
executed
 through hadoop-daemon.sh. (cnauroth)
 
+HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get
+created with ACLs. (Gregory Chanan via asuresh)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/273d2f97/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index ec522dcf..035e594 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -47,6 +47,7 @@ import 
org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
 import org.apache.curator.framework.recipes.shared.SharedCount;
 import org.apache.curator.framework.recipes.shared.VersionedValue;
 import org.apache.curator.retry.RetryNTimes;
+import org.apache.curator.utils.EnsurePath;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -297,6 +298,17 @@ public abstract class 
ZKDelegationTokenSecretManagerTokenIdent extends Abstract
   } catch (Exception e) {
 throw new IOException(Could not start Curator Framework, e);
   }
+} else {
+  // If namespace parents are implicitly created, they won't have ACLs.
+  // So, let's explicitly create them.
+  CuratorFramework nullNsFw = zkClient.usingNamespace(null);
+  EnsurePath ensureNs =
+nullNsFw.newNamespaceAwareEnsurePath(/ + zkClient.getNamespace());
+  try {
+ensureNs.ensure(nullNsFw.getZookeeperClient());
+  } catch (Exception e) {
+throw new IOException(Could not create namespace, e);
+  }
 }
 listenerThreadPool = Executors.newSingleThreadExecutor();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/273d2f97/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
index 6435c0b..185a994 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
@@ -19,9 +19,16 @@
 package 

hadoop git commit: MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between copySucceeded() in one thread and copyFailed() in another thread on the same host. Contributed by Junping Du.

2015-05-19 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 273d2f975 - 6b03ec513


MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between 
copySucceeded() in one thread and copyFailed() in another thread on the same 
host. Contributed by Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b03ec51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b03ec51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b03ec51

Branch: refs/heads/branch-2.7
Commit: 6b03ec51373b175fa44e5d5fb5fae3c64eb620d9
Parents: 273d2f9
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed May 13 00:28:17 2015 +0900
Committer: Junping Du junping...@apache.org
Committed: Tue May 19 12:01:14 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  4 ++
 .../task/reduce/ShuffleSchedulerImpl.java   | 14 +++-
 .../task/reduce/TestShuffleScheduler.java   | 70 
 3 files changed, 85 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b03ec51/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a8cbb8f..710018f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -36,6 +36,10 @@ Release 2.7.1 - UNRELEASED
 that they don't fail on history-server backed by DFSes with not so strong
 guarantees. (Craig Welch via vinodkv)
 
+MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
+copySucceeded() in one thread and copyFailed() in another thread on the
+same host. (Junping Du via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b03ec51/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
index 37f4af3..cce36de 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
@@ -239,7 +239,7 @@ public class ShuffleSchedulerImplK,V implements 
ShuffleSchedulerK,V {
   }
   
   private void updateStatus() {
-updateStatus(null);
+updateStatus(null);
   }
 
   public synchronized void hostFailed(String hostname) {
@@ -263,9 +263,17 @@ public class ShuffleSchedulerImplK,V implements 
ShuffleSchedulerK,V {
   failureCounts.put(mapId, new IntWritable(1));
 }
 String hostname = host.getHostName();
+IntWritable hostFailedNum = hostFailures.get(hostname);
+// MAPREDUCE-6361: hostname could get cleanup from hostFailures in another
+// thread with copySucceeded.
+// In this case, add back hostname to hostFailures to get rid of NPE issue.
+if (hostFailedNum == null) {
+  hostFailures.put(hostname, new IntWritable(1));
+}
 //report failure if already retried maxHostFailures times
-boolean hostFail = hostFailures.get(hostname).get()  getMaxHostFailures() 
? true : false;
-
+boolean hostFail = hostFailures.get(hostname).get() 
+getMaxHostFailures() ? true : false;
+
 if (failures = abortFailureLimit) {
   try {
 throw new IOException(failures +  failures downloading  + mapId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b03ec51/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
index 6ac2320..654b748 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
+++ 

[1/2] hadoop git commit: HADOOP-11963. Metrics documentation for FSNamesystem misspells PendingDataNodeMessageCount. Contributed by Anu Engineer.

2015-05-19 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 00e2a0a49 - 7af9a78fe
  refs/heads/trunk fd3cb533d - e422e76fc


HADOOP-11963. Metrics documentation for FSNamesystem misspells 
PendingDataNodeMessageCount. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e422e76f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e422e76f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e422e76f

Branch: refs/heads/trunk
Commit: e422e76fcaaa04fc22384d978a2abae967d801b6
Parents: fd3cb53
Author: cnauroth cnaur...@apache.org
Authored: Tue May 19 11:50:27 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue May 19 11:50:27 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e422e76f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e4537a3..4621f80 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -716,6 +716,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11581. Multithreaded correctness Warnings
 #org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)
 
+HADOOP-11963. Metrics documentation for FSNamesystem misspells
+PendingDataNodeMessageCount. (Anu Engineer via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e422e76f/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 4a10a00..ca89745 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -225,7 +225,7 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `PendingDeletionBlocks` | Current number of blocks pending deletion |
 | `ExcessBlocks` | Current number of excess blocks |
 | `PostponedMisreplicatedBlocks` | (HA-only) Current number of blocks 
postponed to replicate |
-| `PendingDataNodeMessageCourt` | (HA-only) Current number of pending 
block-related messages for later processing in the standby NameNode |
+| `PendingDataNodeMessageCount` | (HA-only) Current number of pending 
block-related messages for later processing in the standby NameNode |
 | `MillisSinceLastLoadedEdits` | (HA-only) Time in milliseconds since the last 
time standby NameNode load edit log. In active NameNode, set to 0 |
 | `BlockCapacity` | Current number of block capacity |
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |



[2/2] hadoop git commit: HADOOP-11963. Metrics documentation for FSNamesystem misspells PendingDataNodeMessageCount. Contributed by Anu Engineer.

2015-05-19 Thread cnauroth
HADOOP-11963. Metrics documentation for FSNamesystem misspells 
PendingDataNodeMessageCount. Contributed by Anu Engineer.

(cherry picked from commit e422e76fcaaa04fc22384d978a2abae967d801b6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7af9a78f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7af9a78f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7af9a78f

Branch: refs/heads/branch-2
Commit: 7af9a78fe8c63b59dba2fe57b1468f9c1d7673e8
Parents: 00e2a0a
Author: cnauroth cnaur...@apache.org
Authored: Tue May 19 11:50:27 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue May 19 11:50:38 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af9a78f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b201d9a..a3b3d4a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -257,6 +257,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11581. Multithreaded correctness Warnings
 #org.apache.hadoop.fs.shell.Ls (Brahma Reddy Battula via aw)
 
+HADOOP-11963. Metrics documentation for FSNamesystem misspells
+PendingDataNodeMessageCount. (Anu Engineer via cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7af9a78f/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 01a8d5b..14631f5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -224,7 +224,7 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `PendingDeletionBlocks` | Current number of blocks pending deletion |
 | `ExcessBlocks` | Current number of excess blocks |
 | `PostponedMisreplicatedBlocks` | (HA-only) Current number of blocks 
postponed to replicate |
-| `PendingDataNodeMessageCourt` | (HA-only) Current number of pending 
block-related messages for later processing in the standby NameNode |
+| `PendingDataNodeMessageCount` | (HA-only) Current number of pending 
block-related messages for later processing in the standby NameNode |
 | `MillisSinceLastLoadedEdits` | (HA-only) Time in milliseconds since the last 
time standby NameNode load edit log. In active NameNode, set to 0 |
 | `BlockCapacity` | Current number of block capacity |
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |



hadoop git commit: YARN-3302. TestDockerContainerExecutor should run automatically if it can detect docker in the usual place (Ravindra Kumar Naik via raviprak)

2015-05-19 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5009ad4a7 - c97f32e7b


YARN-3302. TestDockerContainerExecutor should run automatically if it can 
detect docker in the usual place (Ravindra Kumar Naik via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c97f32e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c97f32e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c97f32e7

Branch: refs/heads/trunk
Commit: c97f32e7b9d9e1d4c80682cc01741579166174d1
Parents: 5009ad4
Author: Ravi Prakash ravip...@altiscale.com
Authored: Tue May 19 10:28:11 2015 -0700
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Tue May 19 10:28:11 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../TestDockerContainerExecutor.java| 27 +++-
 2 files changed, 24 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97f32e7/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e17e9c7..34cd051 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -417,6 +417,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2421. RM still allocates containers to an app in the FINISHING
 state (Chang Li via jlowe)
 
+YARN-3302. TestDockerContainerExecutor should run automatically if it can
+detect docker in the usual place (Ravindra Kumar Naik via raviprak)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97f32e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
index 65e381c..9386897 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
@@ -51,10 +51,11 @@ import com.google.common.base.Strings;
  * This is intended to test the DockerContainerExecutor code, but it requires
  * docker to be installed.
  * brol
- * liInstall docker, and Compile the code with docker-service-url set to the
- * host and port where docker service is running.
+ * liTo run the tests, set the docker-service-url to the host and port where
+ * docker service is running (If docker-service-url is not specified then the
+ * local daemon will be used).
  * brprecode
- *  mvn clean install -Ddocker-service-url=tcp://0.0.0.0:4243 -DskipTests
+ * mvn test -Ddocker-service-url=tcp://0.0.0.0:4243 
-Dtest=TestDockerContainerExecutor
  * /code/pre
  */
 public class TestDockerContainerExecutor {
@@ -98,10 +99,13 @@ public class TestDockerContainerExecutor {
 
 dockerUrl = System.getProperty(docker-service-url);
 LOG.info(dockerUrl:  + dockerUrl);
-if (Strings.isNullOrEmpty(dockerUrl)) {
+if (!Strings.isNullOrEmpty(dockerUrl)) {
+  dockerUrl =  -H  + dockerUrl;
+} else if(isDockerDaemonRunningLocally()) {
+  dockerUrl = ;
+} else {
   return;
 }
-dockerUrl =  -H  + dockerUrl;
 dockerExec = docker  + dockerUrl;
 conf.set(
   YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
@@ -136,6 +140,17 @@ public class TestDockerContainerExecutor {
 return exec != null;
   }
 
+  private boolean isDockerDaemonRunningLocally() {
+boolean dockerDaemonRunningLocally = true;
+  try {
+shellExec(docker info);
+  } catch (Exception e) {
+LOG.info(docker daemon is not running on local machine.);
+dockerDaemonRunningLocally = false;
+  }
+  return dockerDaemonRunningLocally;
+  }
+
   /**
* Test that a docker container can be launched to run a command
* @param cId a fake ContainerID
@@ -200,7 +215,7 @@ public class TestDockerContainerExecutor {
* Test that a touch command can be launched successfully in a docker
* container
*/
-  @Test
+  @Test(timeout=100)
   public void testLaunchContainer() throws IOException {
 if (!shouldRun()) {
   LOG.warn(Docker not 

hadoop git commit: YARN-3302. TestDockerContainerExecutor should run automatically if it can detect docker in the usual place (Ravindra Kumar Naik via raviprak)

2015-05-19 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d39039d54 - 084e45362


YARN-3302. TestDockerContainerExecutor should run automatically if it can 
detect docker in the usual place (Ravindra Kumar Naik via raviprak)

(cherry picked from commit c97f32e7b9d9e1d4c80682cc01741579166174d1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/084e4536
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/084e4536
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/084e4536

Branch: refs/heads/branch-2
Commit: 084e453629865ced18ec72721d67291623ed21d3
Parents: d39039d
Author: Ravi Prakash ravip...@altiscale.com
Authored: Tue May 19 10:28:11 2015 -0700
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Tue May 19 10:30:24 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../TestDockerContainerExecutor.java| 27 +++-
 2 files changed, 24 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/084e4536/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index eb5c183..c97df93 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -372,6 +372,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2421. RM still allocates containers to an app in the FINISHING
 state (Chang Li via jlowe)
 
+YARN-3302. TestDockerContainerExecutor should run automatically if it can
+detect docker in the usual place (Ravindra Kumar Naik via raviprak)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/084e4536/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
index 65e381c..9386897 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDockerContainerExecutor.java
@@ -51,10 +51,11 @@ import com.google.common.base.Strings;
  * This is intended to test the DockerContainerExecutor code, but it requires
  * docker to be installed.
  * brol
- * liInstall docker, and Compile the code with docker-service-url set to the
- * host and port where docker service is running.
+ * liTo run the tests, set the docker-service-url to the host and port where
+ * docker service is running (If docker-service-url is not specified then the
+ * local daemon will be used).
  * brprecode
- *  mvn clean install -Ddocker-service-url=tcp://0.0.0.0:4243 -DskipTests
+ * mvn test -Ddocker-service-url=tcp://0.0.0.0:4243 
-Dtest=TestDockerContainerExecutor
  * /code/pre
  */
 public class TestDockerContainerExecutor {
@@ -98,10 +99,13 @@ public class TestDockerContainerExecutor {
 
 dockerUrl = System.getProperty(docker-service-url);
 LOG.info(dockerUrl:  + dockerUrl);
-if (Strings.isNullOrEmpty(dockerUrl)) {
+if (!Strings.isNullOrEmpty(dockerUrl)) {
+  dockerUrl =  -H  + dockerUrl;
+} else if(isDockerDaemonRunningLocally()) {
+  dockerUrl = ;
+} else {
   return;
 }
-dockerUrl =  -H  + dockerUrl;
 dockerExec = docker  + dockerUrl;
 conf.set(
   YarnConfiguration.NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME, yarnImage);
@@ -136,6 +140,17 @@ public class TestDockerContainerExecutor {
 return exec != null;
   }
 
+  private boolean isDockerDaemonRunningLocally() {
+boolean dockerDaemonRunningLocally = true;
+  try {
+shellExec(docker info);
+  } catch (Exception e) {
+LOG.info(docker daemon is not running on local machine.);
+dockerDaemonRunningLocally = false;
+  }
+  return dockerDaemonRunningLocally;
+  }
+
   /**
* Test that a docker container can be launched to run a command
* @param cId a fake ContainerID
@@ -200,7 +215,7 @@ public class TestDockerContainerExecutor {
* Test that a touch command can be launched successfully in a docker
* container
*/
-  @Test
+  @Test(timeout=100)
   public void 

hadoop git commit: HADOOP-12000. cannot use --java-home in test-patch (aw)

2015-05-19 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 743896658 - 12d6c5ce4


HADOOP-12000. cannot use --java-home in test-patch (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d6c5ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d6c5ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d6c5ce

Branch: refs/heads/trunk
Commit: 12d6c5ce4f78bc0e9464522715920866abe1f727
Parents: 7438966
Author: Allen Wittenauer a...@apache.org
Authored: Tue May 19 15:24:23 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Tue May 19 15:24:23 2015 -0700

--
 dev-support/test-patch.sh   | 2 +-
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d6c5ce/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 57fd657..d5d6c26 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -679,7 +679,7 @@ function parse_args
   --issue-re=*)
 ISSUE_RE=${i#*=}
   ;;
-  --java-home)
+  --java-home=*)
 JAVA_HOME=${i#*=}
   ;;
   --jenkins)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d6c5ce/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4621f80..3e7cb39 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -719,6 +719,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11963. Metrics documentation for FSNamesystem misspells
 PendingDataNodeMessageCount. (Anu Engineer via cnauroth)
 
+HADOOP-12000. cannot use --java-home in test-patch (aw)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-12000. cannot use --java-home in test-patch (aw)

2015-05-19 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 adb90c7f5 - 511a503aa


HADOOP-12000. cannot use --java-home in test-patch (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/511a503a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/511a503a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/511a503a

Branch: refs/heads/branch-2
Commit: 511a503aabf4c50e9a52d301104bde8e2995c729
Parents: adb90c7
Author: Allen Wittenauer a...@apache.org
Authored: Tue May 19 15:24:23 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Tue May 19 15:24:53 2015 -0700

--
 dev-support/test-patch.sh   | 2 +-
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/511a503a/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 57fd657..d5d6c26 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -679,7 +679,7 @@ function parse_args
   --issue-re=*)
 ISSUE_RE=${i#*=}
   ;;
-  --java-home)
+  --java-home=*)
 JAVA_HOME=${i#*=}
   ;;
   --jenkins)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/511a503a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a3b3d4a..f2a1572 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -260,6 +260,8 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11963. Metrics documentation for FSNamesystem misspells
 PendingDataNodeMessageCount. (Anu Engineer via cnauroth)
 
+HADOOP-12000. cannot use --java-home in test-patch (aw)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted. Contributed by Varun Vasudev (cherry picked from commit 7438966586f1896ab3e8b067d47a4af28a894106)

2015-05-19 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7af9a78fe - adb90c7f5


YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted. 
Contributed by Varun Vasudev
(cherry picked from commit 7438966586f1896ab3e8b067d47a4af28a894106)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/adb90c7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/adb90c7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/adb90c7f

Branch: refs/heads/branch-2
Commit: adb90c7f52be4c443a1050b2bfcbcb5cdf8542f5
Parents: 7af9a78
Author: Jian He jia...@apache.org
Authored: Tue May 19 14:20:31 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Tue May 19 14:21:48 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../pom.xml |   5 +
 .../distributedshell/ApplicationMaster.java |  54 +++-
 .../distributedshell/TestDSAppMaster.java   | 130 +++
 4 files changed, 187 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/adb90c7f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c97df93..16cb27b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -375,6 +375,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3302. TestDockerContainerExecutor should run automatically if it can
 detect docker in the usual place (Ravindra Kumar Naik via raviprak)
 
+YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted.
+(Varun Vasudev via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/adb90c7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index 24f8bcc..6ac8bf1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -116,6 +116,11 @@
   typetest-jar/type
   scopetest/scope
 /dependency
+dependency
+  groupIdorg.mockito/groupId
+  artifactIdmockito-all/artifactId
+  scopetest/scope
+/dependency
   /dependencies
 
   build

http://git-wip-us.apache.org/repos/asf/hadoop/blob/adb90c7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index b62c24c..b28c0c9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -30,10 +30,12 @@ import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.Vector;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -277,6 +279,10 @@ public class ApplicationMaster {
   private final String linux_bash_command = bash;
   private final String windows_command = cmd /c;
 
+  @VisibleForTesting
+  protected final SetContainerId launchedContainers =
+  Collections.newSetFromMap(new ConcurrentHashMapContainerId, Boolean());
+
   /**
* @param args Command line args
*/
@@ -601,8 +607,12 @@ public class ApplicationMaster {
 response.getContainersFromPreviousAttempts();
 LOG.info(appAttemptID +  received 

[2/2] hadoop git commit: HDFS-8375. Add cellSize as an XAttr to ECZone. Contributed by Vinayakumar B.

2015-05-19 Thread zhz
HDFS-8375. Add cellSize as an XAttr to ECZone. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c36df15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c36df15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c36df15

Branch: refs/heads/HDFS-7285
Commit: 3c36df1540e3483e34acd1995a39d9960b06eff6
Parents: 3676277
Author: Zhe Zhang z...@apache.org
Authored: Tue May 19 13:58:50 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue May 19 13:58:50 2015 -0700

--
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 14 --
 .../protocol/SnapshottableDirectoryStatus.java  |  2 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  2 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  8 ++--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 +--
 .../hadoop/hdfs/DFSStripedInputStream.java  | 10 ++--
 .../hadoop/hdfs/DFSStripedOutputStream.java |  2 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  9 ++--
 .../hadoop/hdfs/protocol/ClientProtocol.java|  2 +-
 .../hdfs/protocol/ErasureCodingZoneInfo.java| 14 +-
 .../hdfs/protocol/HdfsLocatedFileStatus.java|  5 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  3 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  5 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 15 --
 .../server/blockmanagement/BlockManager.java| 12 +++--
 .../blockmanagement/DatanodeDescriptor.java |  4 +-
 .../erasurecode/ErasureCodingWorker.java|  2 +-
 .../namenode/ErasureCodingZoneManager.java  | 40 +---
 .../server/namenode/FSDirStatAndListingOp.java  | 21 ++---
 .../hdfs/server/namenode/FSDirectory.java   |  4 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 49 +++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  5 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  8 ++--
 .../server/protocol/BlockECRecoveryCommand.java | 26 ++-
 .../hdfs/tools/erasurecode/ECCommand.java   | 14 +-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 49 +---
 .../src/main/proto/erasurecoding.proto  |  3 ++
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  3 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |  4 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 10 ++--
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  2 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  2 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  2 +-
 .../hadoop/hdfs/TestErasureCodingZones.java | 18 +++
 .../hadoop/hdfs/TestFileStatusWithECschema.java |  2 +-
 .../java/org/apache/hadoop/hdfs/TestLease.java  |  4 +-
 .../hadoop/hdfs/TestRecoverStripedFile.java |  2 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |  2 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  6 ++-
 .../server/namenode/TestAddStripedBlocks.java   |  2 +-
 .../server/namenode/TestFSEditLogLoader.java|  4 +-
 .../hdfs/server/namenode/TestFSImage.java   |  4 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |  4 +-
 .../namenode/TestQuotaWithStripedBlocks.java|  2 +-
 ...TestOfflineImageViewerWithStripedBlocks.java |  2 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  |  4 +-
 .../apache/hadoop/hdfs/web/TestJsonUtil.java|  2 +-
 48 files changed, 244 insertions(+), 174 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36df15/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index f07973a..8c902b4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -49,7 +49,8 @@ public class HdfsFileStatus {
 
   private final FileEncryptionInfo feInfo;
 
-  private final ECSchema schema;
+  private final ECSchema ecSchema;
+  private final int stripeCellSize;
   
   // Used by dir, not including dot and dotdot. Always zero for a regular file.
   private final int childrenNum;
@@ -76,7 +77,7 @@ public class HdfsFileStatus {
   long blocksize, long modification_time, long access_time,
   FsPermission permission, String owner, String group, byte[] symlink,
   byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
-  byte storagePolicy, ECSchema schema) {
+  byte 

[1/2] hadoop git commit: HDFS-8375. Add cellSize as an XAttr to ECZone. Contributed by Vinayakumar B.

2015-05-19 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 3676277c1 - 3c36df154


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36df15/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
index 86fcb88..9c585a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
@@ -66,7 +66,7 @@ public class TestQuotaWithStripedBlocks {
 dfs = cluster.getFileSystem();
 
 dfs.mkdirs(ecDir);
-dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema);
+dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema, 0);
 dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
 dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
 dfs.setStoragePolicy(ecDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36df15/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
index f3ef39a..2a51f99 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
@@ -61,7 +61,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 cluster.waitActive();
-cluster.getFileSystem().getClient().createErasureCodingZone(/, null);
+cluster.getFileSystem().getClient().createErasureCodingZone(/, null, 0);
 fs = cluster.getFileSystem();
 Path eczone = new Path(/eczone);
 fs.mkdirs(eczone);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36df15/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
index 6f29d69..5a1c3fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
@@ -152,7 +152,7 @@ public class TestStripedBlockUtil {
 int done = 0;
 while (done  bgSize) {
   Preconditions.checkState(done % CELLSIZE == 0);
-  StripingCell cell = new StripingCell(SCEHMA, done / CELLSIZE);
+  StripingCell cell = new StripingCell(SCEHMA, CELLSIZE, done / CELLSIZE);
   int idxInStripe = cell.idxInStripe;
   int size = Math.min(CELLSIZE, bgSize - done);
   for (int i = 0; i  size; i++) {
@@ -247,7 +247,7 @@ public class TestStripedBlockUtil {
 continue;
   }
   AlignedStripe[] stripes = divideByteRangeIntoStripes(SCEHMA,
-  blockGroup, brStart, brStart + brSize - 1, assembled, 0);
+  CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 
0);
 
   for (AlignedStripe stripe : stripes) {
 for (int i = 0; i  DATA_BLK_NUM; i++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c36df15/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 8947c5b..303d063 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -65,7 +65,7 @@ public class TestJsonUtil {
 final 

hadoop git commit: YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted. Contributed by Varun Vasudev

2015-05-19 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk e422e76fc - 743896658


YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted. 
Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74389665
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74389665
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74389665

Branch: refs/heads/trunk
Commit: 7438966586f1896ab3e8b067d47a4af28a894106
Parents: e422e76
Author: Jian He jia...@apache.org
Authored: Tue May 19 14:20:31 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Tue May 19 14:20:31 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../pom.xml |   5 +
 .../distributedshell/ApplicationMaster.java |  54 +++-
 .../distributedshell/TestDSAppMaster.java   | 130 +++
 4 files changed, 187 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74389665/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 34cd051..5a6fb38 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -420,6 +420,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3302. TestDockerContainerExecutor should run automatically if it can
 detect docker in the usual place (Ravindra Kumar Naik via raviprak)
 
+YARN-2821. Fixed a problem that DistributedShell AM may hang if restarted.
+(Varun Vasudev via jianhe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74389665/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index 5b4440f..09a56ea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -116,6 +116,11 @@
   typetest-jar/type
   scopetest/scope
 /dependency
+dependency
+  groupIdorg.mockito/groupId
+  artifactIdmockito-all/artifactId
+  scopetest/scope
+/dependency
   /dependencies
 
   build

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74389665/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index b62c24c..b28c0c9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -30,10 +30,12 @@ import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.Vector;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -277,6 +279,10 @@ public class ApplicationMaster {
   private final String linux_bash_command = bash;
   private final String windows_command = cmd /c;
 
+  @VisibleForTesting
+  protected final SetContainerId launchedContainers =
+  Collections.newSetFromMap(new ConcurrentHashMapContainerId, Boolean());
+
   /**
* @param args Command line args
*/
@@ -601,8 +607,12 @@ public class ApplicationMaster {
 response.getContainersFromPreviousAttempts();
 LOG.info(appAttemptID +  received  + previousAMRunningContainers.size()
   +  previous attempts' running 

hadoop git commit: HDFS-8404. Pending block replication can get stuck using older genstamp. Contributed by Nathan Roberts. (cherry picked from commit 8860e352c394372e4eb3ebdf82ea899567f34e4e)

2015-05-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 f0399f56e - 7ac66f871


HDFS-8404. Pending block replication can get stuck using older genstamp. 
Contributed by Nathan Roberts.
(cherry picked from commit 8860e352c394372e4eb3ebdf82ea899567f34e4e)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ac66f87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ac66f87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ac66f87

Branch: refs/heads/branch-2.7
Commit: 7ac66f8712b51c22f6965f1273c1a1179420b8fc
Parents: f0399f5
Author: Kihwal Lee kih...@apache.org
Authored: Tue May 19 13:17:45 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Tue May 19 13:17:45 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockManager.java| 17 ++--
 .../blockmanagement/TestPendingReplication.java | 98 +++-
 3 files changed, 108 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ac66f87/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ddab0e5..654c1b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -84,6 +84,9 @@ Release 2.7.1 - UNRELEASED
 
 HDFS-8405. Fix a typo in NamenodeFsck.  (Takanobu Asanuma via szetszwo)
 
+HDFS-8404. Pending block replication can get stuck using older genstamp
+(Nathan Roberts via kihwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ac66f87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index d6aa814..09e5748 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1683,13 +1683,18 @@ public class BlockManager {
   namesystem.writeLock();
   try {
 for (int i = 0; i  timedOutItems.length; i++) {
+  /*
+   * Use the blockinfo from the blocksmap to be certain we're working
+   * with the most up-to-date block information (e.g. genstamp).
+   */
+  BlockInfoContiguous bi = blocksMap.getStoredBlock(timedOutItems[i]);
+  if (bi == null) {
+continue;
+  }
   NumberReplicas num = countNodes(timedOutItems[i]);
-  if (isNeededReplication(timedOutItems[i], 
getReplication(timedOutItems[i]),
- num.liveReplicas())) {
-neededReplications.add(timedOutItems[i],
-   num.liveReplicas(),
-   num.decommissionedReplicas(),
-   getReplication(timedOutItems[i]));
+  if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) 
{
+neededReplications.add(bi, num.liveReplicas(),
+num.decommissionedReplicas(), getReplication(bi));
   }
 }
   } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ac66f87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
index c63badc..698a38e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import 
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import 

hadoop git commit: HADOOP-11970. Replace uses of ThreadLocalRandom with JDK7 ThreadLocalRandom (Sean Busbey via Colin P. McCabe)

2015-05-19 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk c97f32e7b - 470c87dbc


HADOOP-11970. Replace uses of ThreadLocalRandom with JDK7 ThreadLocalRandom 
(Sean Busbey via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/470c87db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/470c87db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/470c87db

Branch: refs/heads/trunk
Commit: 470c87dbc6c24dd3b370f1ad9e7ab1f6dabd2080
Parents: c97f32e
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue May 19 10:49:17 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Tue May 19 10:50:15 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/io/retry/RetryPolicies.java   | 14 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  5 ++-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java| 16 +--
 .../server/blockmanagement/BlockManager.java|  8 ++--
 .../server/blockmanagement/DatanodeManager.java |  3 +-
 .../server/blockmanagement/Host2NodesMap.java   |  4 +-
 .../hdfs/server/datanode/BPServiceActor.java| 10 ++---
 .../hdfs/server/datanode/DirectoryScanner.java  |  5 ++-
 .../datanode/metrics/DataNodeMetrics.java   |  5 ++-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  3 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |  4 +-
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 22 +-
 .../apache/hadoop/hdfs/TestRollingUpgrade.java  |  6 ++-
 .../blockmanagement/TestReplicationPolicy.java  | 46 
 .../hdfs/server/namenode/TestFileTruncate.java  | 10 ++---
 .../hdfs/server/namenode/ha/TestDNFencing.java  |  4 +-
 .../hdfs/server/namenode/ha/TestHAAppend.java   |  6 +--
 .../hadoop/hdfs/util/TestByteArrayManager.java  | 14 +++---
 .../sharedcache/SharedCacheUploader.java| 11 +
 21 files changed, 101 insertions(+), 102 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index baf9a0f..10da9d7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -595,6 +595,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
 split calculation (gera)
 
+HADOOP-11970. Replace uses of ThreadLocalRandom with JDK7
+ThreadLocalRandom.  (Sean Busbey via Colin P. McCabe)
+
   BUG FIXES
 HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
 is an I/O error during requestShortCircuitShm (cmccabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/470c87db/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 14ded8e..a86f443 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -28,7 +28,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
@@ -47,13 +47,6 @@ public class RetryPolicies {
   
   public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
   
-  private static ThreadLocalRandom RANDOM = new ThreadLocalRandom() {
-@Override
-protected Random initialValue() {
-  return new Random();
-}
-  };
-  
   /**
* p
* Try once, and fail by re-throwing the exception.
@@ -321,7 +314,8 @@ public class RetryPolicies {
   }
 
   //calculate sleep time and return.
-  final double ratio = RANDOM.get().nextDouble() + 0.5;//0.5 = ratio =1.5
+  // ensure 0.5 = ratio =1.5
+  final double ratio = ThreadLocalRandom.current().nextDouble() + 0.5;
   final long sleepTime = Math.round(p.sleepMillis * ratio);
   return new RetryAction(RetryAction.RetryDecision.RETRY, sleepTime);
 }
@@ -610,7 +604,7 @@ public class RetryPolicies {
   private static long calculateExponentialTime(long time, int retries,
   long cap) {
 long baseTime = Math.min(time * (1L  retries), 

hadoop git commit: HDFS-8404. Pending block replication can get stuck using older genstamp. Contributed by Nathan Roberts.

2015-05-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 470c87dbc - 8860e352c


HDFS-8404. Pending block replication can get stuck using older genstamp. 
Contributed by Nathan Roberts.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8860e352
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8860e352
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8860e352

Branch: refs/heads/trunk
Commit: 8860e352c394372e4eb3ebdf82ea899567f34e4e
Parents: 470c87d
Author: Kihwal Lee kih...@apache.org
Authored: Tue May 19 13:05:15 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Tue May 19 13:05:15 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockManager.java| 17 ++--
 .../blockmanagement/TestPendingReplication.java | 98 +++-
 3 files changed, 108 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8860e352/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 76888a9..5bcaddd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -878,6 +878,9 @@ Release 2.7.1 - UNRELEASED
 
 HDFS-8405. Fix a typo in NamenodeFsck.  (Takanobu Asanuma via szetszwo)
 
+HDFS-8404. Pending block replication can get stuck using older genstamp
+(Nathan Roberts via kihwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8860e352/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8012f71..54981fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1690,13 +1690,18 @@ public class BlockManager {
   namesystem.writeLock();
   try {
 for (int i = 0; i  timedOutItems.length; i++) {
+  /*
+   * Use the blockinfo from the blocksmap to be certain we're working
+   * with the most up-to-date block information (e.g. genstamp).
+   */
+  BlockInfoContiguous bi = blocksMap.getStoredBlock(timedOutItems[i]);
+  if (bi == null) {
+continue;
+  }
   NumberReplicas num = countNodes(timedOutItems[i]);
-  if (isNeededReplication(timedOutItems[i], 
getReplication(timedOutItems[i]),
- num.liveReplicas())) {
-neededReplications.add(timedOutItems[i],
-   num.liveReplicas(),
-   num.decommissionedAndDecommissioning(),
-   getReplication(timedOutItems[i]));
+  if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) 
{
+neededReplications.add(bi, num.liveReplicas(),
+num.decommissionedAndDecommissioning(), getReplication(bi));
   }
 }
   } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8860e352/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
index c63badc..259404e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import 
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * This class tests the internals of PendingReplicationBlocks.java,
@@ -52,13 +53,11 @@ public class TestPendingReplication {
   private static 

hadoop git commit: HDFS-8404. Pending block replication can get stuck using older genstamp. Contributed by Nathan Roberts. (cherry picked from commit 8860e352c394372e4eb3ebdf82ea899567f34e4e)

2015-05-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 084e45362 - 536b9ee6d


HDFS-8404. Pending block replication can get stuck using older genstamp. 
Contributed by Nathan Roberts.
(cherry picked from commit 8860e352c394372e4eb3ebdf82ea899567f34e4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/536b9ee6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/536b9ee6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/536b9ee6

Branch: refs/heads/branch-2
Commit: 536b9ee6d6e5b8430fda23cbdcfd859c299fa8ad
Parents: 084e453
Author: Kihwal Lee kih...@apache.org
Authored: Tue May 19 13:06:48 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Tue May 19 13:06:48 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockManager.java| 17 ++--
 .../blockmanagement/TestPendingReplication.java | 98 +++-
 3 files changed, 108 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/536b9ee6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3bbfd69..5f98b72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -544,6 +544,9 @@ Release 2.7.1 - UNRELEASED
 
 HDFS-8405. Fix a typo in NamenodeFsck.  (Takanobu Asanuma via szetszwo)
 
+HDFS-8404. Pending block replication can get stuck using older genstamp
+(Nathan Roberts via kihwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/536b9ee6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 11c829d..a71d97b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1690,13 +1690,18 @@ public class BlockManager {
   namesystem.writeLock();
   try {
 for (int i = 0; i  timedOutItems.length; i++) {
+  /*
+   * Use the blockinfo from the blocksmap to be certain we're working
+   * with the most up-to-date block information (e.g. genstamp).
+   */
+  BlockInfoContiguous bi = blocksMap.getStoredBlock(timedOutItems[i]);
+  if (bi == null) {
+continue;
+  }
   NumberReplicas num = countNodes(timedOutItems[i]);
-  if (isNeededReplication(timedOutItems[i], 
getReplication(timedOutItems[i]),
- num.liveReplicas())) {
-neededReplications.add(timedOutItems[i],
-   num.liveReplicas(),
-   num.decommissionedAndDecommissioning(),
-   getReplication(timedOutItems[i]));
+  if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) 
{
+neededReplications.add(bi, num.liveReplicas(),
+num.decommissionedAndDecommissioning(), getReplication(bi));
   }
 }
   } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/536b9ee6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
index c63badc..259404e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import 
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * This class tests the internals of PendingReplicationBlocks.java,
@@ 

hadoop git commit: HADOOP-11970. Replace uses of ThreadLocalRandom with JDK7 ThreadLocalRandom (Sean Busbey via Colin P. McCabe)

2015-05-19 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 536b9ee6d - 9534e533d


HADOOP-11970. Replace uses of ThreadLocalRandom with JDK7 ThreadLocalRandom 
(Sean Busbey via Colin P. McCabe)

(cherry picked from commit 470c87dbc6c24dd3b370f1ad9e7ab1f6dabd2080)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9534e533
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9534e533
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9534e533

Branch: refs/heads/branch-2
Commit: 9534e533dc267372e83ef0c9858881f4faa4cf68
Parents: 536b9ee
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Tue May 19 10:49:17 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Tue May 19 11:15:04 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../apache/hadoop/io/retry/RetryPolicies.java   | 14 +++---
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  5 +++-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java| 16 ++--
 .../server/blockmanagement/BlockManager.java|  8 +++---
 .../server/blockmanagement/DatanodeManager.java |  3 ++-
 .../server/blockmanagement/Host2NodesMap.java   |  4 +--
 .../hadoop/hdfs/server/common/JspHelper.java|  3 ++-
 .../hdfs/server/datanode/BPServiceActor.java| 10 
 .../hdfs/server/datanode/DirectoryScanner.java  |  5 ++--
 .../datanode/metrics/DataNodeMetrics.java   |  5 ++--
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  3 ++-
 .../hdfs/server/namenode/NamenodeFsck.java  |  4 +--
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 22 
 .../apache/hadoop/hdfs/TestRollingUpgrade.java  |  6 +++--
 .../blockmanagement/TestReplicationPolicy.java  | 27 
 .../hdfs/server/namenode/TestFileTruncate.java  | 10 
 .../hdfs/server/namenode/ha/TestDNFencing.java  |  4 +--
 .../hdfs/server/namenode/ha/TestHAAppend.java   |  4 +--
 .../hadoop/hdfs/util/TestByteArrayManager.java  | 14 +-
 .../sharedcache/SharedCacheUploader.java| 11 ++--
 22 files changed, 92 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9534e533/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f0004e4..cd744f9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -130,6 +130,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
 split calculation (gera)
 
+HADOOP-11970. Replace uses of ThreadLocalRandom with JDK7
+ThreadLocalRandom.  (Sean Busbey via Colin P. McCabe)
+
   BUG FIXES
 HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
 is an I/O error during requestShortCircuitShm (cmccabe)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9534e533/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 14ded8e..a86f443 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -28,7 +28,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
@@ -47,13 +47,6 @@ public class RetryPolicies {
   
   public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
   
-  private static ThreadLocalRandom RANDOM = new ThreadLocalRandom() {
-@Override
-protected Random initialValue() {
-  return new Random();
-}
-  };
-  
   /**
* p
* Try once, and fail by re-throwing the exception.
@@ -321,7 +314,8 @@ public class RetryPolicies {
   }
 
   //calculate sleep time and return.
-  final double ratio = RANDOM.get().nextDouble() + 0.5;//0.5 = ratio =1.5
+  // ensure 0.5 = ratio =1.5
+  final