git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-06 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3b35f8160 -> 88209ce18


HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88209ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88209ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88209ce1

Branch: refs/heads/trunk
Commit: 88209ce181b5ecc55c0ae2bceff4893ab4817e88
Parents: 3b35f81
Author: Konstantin V Shvachko 
Authored: Sat Sep 6 12:07:52 2014 -0700
Committer: Konstantin V Shvachko 
Committed: Sat Sep 6 12:07:52 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 333bdce..4412b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8470680..6176188 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1616,7 +1633,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/bloc

git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-06 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 035112f25 -> 1ea388355


HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea38835
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea38835
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea38835

Branch: refs/heads/branch-2
Commit: 1ea388355376342e38a2819291a22e83f845a1ef
Parents: 035112f
Author: Konstantin V Shvachko 
Authored: Sat Sep 6 12:04:49 2014 -0700
Committer: Konstantin V Shvachko 
Committed: Sat Sep 6 12:04:49 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea38835/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5b74293..6a44347 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -188,6 +188,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea38835/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5d23c1f..de02de1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1619,7 +1636,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea38835/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/serv

[1/5] git commit: HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to determine if in proxyuser mode or not. (tucu)

2014-09-06 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-6581 339d21f27 -> 31bbeaf38


HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to 
determine if in proxyuser mode or not. (tucu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f3c19c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f3c19c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f3c19c1

Branch: refs/heads/HDFS-6581
Commit: 0f3c19c1bb9e341d8aed132ba3eb9e7fc7588306
Parents: 71c8d73
Author: Alejandro Abdelnur 
Authored: Fri Sep 5 10:04:07 2014 -0700
Committer: Alejandro Abdelnur 
Committed: Fri Sep 5 21:59:12 2014 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 3 +++
 .../org/apache/hadoop/crypto/key/kms/KMSClientProvider.java| 6 +++---
 .../java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java  | 6 +++---
 3 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3c19c1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9aef131..c77fddc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -765,6 +765,9 @@ Release 2.6.0 - UNRELEASED
 HADOOP-11067. warning message 'ssl.client.truststore.location has not
 been set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
 
+HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to
+determine if in proxyuser mode or not. (tucu)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3c19c1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index a4e336c..acbe096 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -385,9 +385,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   // if current UGI is different from UGI at constructor time, behave as
   // proxyuser
   UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
-  final String doAsUser =
-  (loginUgi.getShortUserName().equals(currentUgi.getShortUserName()))
-  ? null : currentUgi.getShortUserName();
+  final String doAsUser = (currentUgi.getAuthenticationMethod() ==
+  UserGroupInformation.AuthenticationMethod.PROXY)
+  ? currentUgi.getShortUserName() : null;
 
   // creating the HTTP connection using the current UGI at constructor time
   conn = loginUgi.doAs(new PrivilegedExceptionAction() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3c19c1/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index f381fa0..b921c84 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1157,7 +1157,7 @@ public class TestKMS {
 final URI uri = createKMSUri(getKMSUrl());
 
 // proxyuser client using kerberos credentials
-UserGroupInformation clientUgi = UserGroupInformation.
+final UserGroupInformation clientUgi = UserGroupInformation.
 loginUserFromKeytabAndReturnUGI("client", 
keytab.getAbsolutePath());
 clientUgi.doAs(new PrivilegedExceptionAction() {
   @Override
@@ -1167,7 +1167,7 @@ public class TestKMS {
 
 // authorized proxyuser
 UserGroupInformation fooUgi =
-UserGroupInformation.createRemoteUser("foo");
+UserGroupInformation.createProxyUser("foo", clientUgi);
 fooUgi.doAs(new PrivilegedExceptionAction() {
   @Override
   public Void run() throws Exception {
@@ -1179,

[2/5] git commit: HADOOP-11070. Create MiniKMS for testing. (tucu)

2014-09-06 Thread arp
HADOOP-11070. Create MiniKMS for testing. (tucu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71c8d735
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71c8d735
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71c8d735

Branch: refs/heads/HDFS-6581
Commit: 71c8d735f5038e3b516947f12180d7568b6979dc
Parents: e6420fe
Author: Alejandro Abdelnur 
Authored: Fri Sep 5 14:09:22 2014 -0700
Committer: Alejandro Abdelnur 
Committed: Fri Sep 5 21:59:12 2014 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 hadoop-common-project/hadoop-kms/pom.xml|   4 +-
 .../hadoop/crypto/key/kms/server/MiniKMS.java   | 197 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   |  82 +---
 4 files changed, 211 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c8d735/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 88804cd..9aef131 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -507,6 +507,8 @@ Release 2.6.0 - UNRELEASED
 HADOOP-11060. Create a CryptoCodec test that verifies interoperability 
 between the JCE and OpenSSL implementations. (hitliuyi via tucu)
 
+HADOOP-11070. Create MiniKMS for testing. (tucu)
+
   OPTIMIZATIONS
 
 HADOOP-10838. Byte array native checksumming. (James Thomas via todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c8d735/hadoop-common-project/hadoop-kms/pom.xml
--
diff --git a/hadoop-common-project/hadoop-kms/pom.xml 
b/hadoop-common-project/hadoop-kms/pom.xml
index 3bb97c5..629ffda 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -222,9 +222,9 @@
 
 
   
-
+
 
-
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c8d735/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
new file mode 100644
index 000..5a6d4c5
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.security.SslSocketConnector;
+import org.mortbay.jetty.webapp.WebAppContext;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.Writer;
+import java.net.InetAddress;
+import java.net.MalformedURLException;
+import java.net.ServerSocket;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+
+public class MiniKMS {
+
+  private static Server createJettyServer(String keyStore, String password) {
+try {
+  boolean ssl = keyStore != null;
+  InetAddress localhost = InetAddress.getByName("localhost");
+  String host = "localhost";
+  ServerSocket ss = new ServerSocket(0, 50, localhost);
+  int port = ss.getLocalPort();
+  ss.close();
+  Server server = new Server(0);
+  if (!ssl) {
+server.getConnectors()[0].setHost(host);
+server.getConnectors()[0].setPort(port);
+  } else {
+SslSocketConnector c = new SslSocketConnector(

[4/5] git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-06 Thread arp
HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88209ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88209ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88209ce1

Branch: refs/heads/HDFS-6581
Commit: 88209ce181b5ecc55c0ae2bceff4893ab4817e88
Parents: 3b35f81
Author: Konstantin V Shvachko 
Authored: Sat Sep 6 12:07:52 2014 -0700
Committer: Konstantin V Shvachko 
Committed: Sat Sep 6 12:07:52 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 333bdce..4412b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8470680..6176188 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1616,7 +1633,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 709f060..55d616f 100644
--- 
a/hadoop-

[5/5] git commit: Merge remote-tracking branch 'apache-commit/trunk' into HDFS-6581

2014-09-06 Thread arp
Merge remote-tracking branch 'apache-commit/trunk' into HDFS-6581


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31bbeaf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31bbeaf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31bbeaf3

Branch: refs/heads/HDFS-6581
Commit: 31bbeaf383a728a0453f5dbe93eed0ab18b5b992
Parents: 339d21f 88209ce
Author: arp 
Authored: Sat Sep 6 12:28:22 2014 -0700
Committer: arp 
Committed: Sat Sep 6 12:28:22 2014 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   5 +
 .../crypto/key/kms/KMSClientProvider.java   |   6 +-
 hadoop-common-project/hadoop-kms/pom.xml|   4 +-
 .../hadoop/crypto/key/kms/server/MiniKMS.java   | 197 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   |  88 ++---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   5 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +
 .../hadoop/hdfs/DistributedFileSystem.java  |  24 +++
 .../server/blockmanagement/BlockManager.java|  23 ++-
 .../server/blockmanagement/DatanodeManager.java |   6 +-
 .../server/blockmanagement/HostFileManager.java |   4 +
 .../hdfs/server/namenode/FSNamesystem.java  |  46 +++--
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  43 
 .../hdfs/server/namenode/NameNodeAdapter.java   |   2 +-
 14 files changed, 351 insertions(+), 106 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31bbeaf3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--



[3/5] git commit: HDFS-6986. DistributedFileSystem must get delegation tokens from configured KeyProvider. (zhz via tucu)

2014-09-06 Thread arp
HDFS-6986. DistributedFileSystem must get delegation tokens from configured 
KeyProvider. (zhz via tucu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b35f816
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b35f816
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b35f816

Branch: refs/heads/HDFS-6581
Commit: 3b35f81603bbfae119762b50bcb46de70a421368
Parents: 0f3c19c
Author: Alejandro Abdelnur 
Authored: Fri Sep 5 22:33:48 2014 -0700
Committer: Alejandro Abdelnur 
Committed: Fri Sep 5 22:33:48 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++
 .../hadoop/hdfs/DistributedFileSystem.java  | 24 +++
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 43 
 4 files changed, 74 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b35f816/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0772ea6..333bdce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -711,6 +711,9 @@ Release 2.6.0 - UNRELEASED
   HDFS-6714. TestBlocksScheduledCounter#testBlocksScheduledCounter should
   shutdown cluster (vinayakumarb)
 
+  HDFS-6986. DistributedFileSystem must get delegation tokens from 
configured 
+  KeyProvider. (zhz via tucu)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b35f816/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8daf912..e4215f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3084,4 +3084,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   DFSHedgedReadMetrics getHedgedReadMetrics() {
 return HEDGED_READ_METRIC;
   }
+
+  public KeyProviderCryptoExtension getKeyProvider() {
+return provider;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b35f816/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index bf7d62e..dbdf5c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -84,8 +84,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -1946,6 +1948,28 @@ public class DistributedFileSystem extends FileSystem {
 }.resolve(this, absF);
   }
 
+  @Override
+  public Token[] addDelegationTokens(
+  final String renewer, Credentials credentials) throws IOException {
+Token[] tokens = super.addDelegationTokens(renewer, credentials);
+if (dfs.getKeyProvider() != null) {
+  KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension =
+  KeyProviderDelegationTokenExtension.
+  createKeyProviderDelegationTokenExtension(dfs.getKeyProvider());
+  Token[] kpTokens = keyProviderDelegationTokenExtension.
+  addDelegationTokens(renewer, credentials);
+  if (tokens != null && kpTokens != null) {
+Token[] all = new Token[tokens.length + kpTokens.length];
+System.arraycopy(tokens, 0, all, 0, tokens.length);
+System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
+tokens = all;
+  } else {
+tokens = (tokens != null) ? tokens : kpTokens;
+  }
+}
+return tokens;
+  }
+
   public DFSInotifyEventInputStream getInotifyEve

git commit: HDFS-6997: add more tests for data migration and replicaion.

2014-09-06 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-6584 ba4fc9317 -> 22a41dce4


HDFS-6997: add more tests for data migration and replicaion.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22a41dce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22a41dce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22a41dce

Branch: refs/heads/HDFS-6584
Commit: 22a41dce4af4d5b533ba875b322551db1c152878
Parents: ba4fc93
Author: Tsz-Wo Nicholas Sze 
Authored: Sun Sep 7 07:44:28 2014 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Sun Sep 7 07:44:28 2014 +0800

--
 .../server/blockmanagement/BlockManager.java|   2 +-
 .../BlockPlacementPolicyDefault.java| 131 +++--
 .../blockmanagement/DatanodeDescriptor.java |  52 +-
 .../blockmanagement/DatanodeStorageInfo.java|   2 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   7 +
 .../apache/hadoop/hdfs/util/EnumCounters.java   |   9 +
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  12 +-
 .../hdfs/server/mover/TestStorageMover.java | 477 +--
 8 files changed, 579 insertions(+), 113 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22a41dce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index af83653..956900d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2930,7 +2930,7 @@ public class BlockManager {
 // Decrement number of blocks scheduled to this datanode.
 // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
 // RECEIVED_BLOCK), we currently also decrease the approximate number. 
-node.decrementBlocksScheduled();
+node.decrementBlocksScheduled(storageInfo.getStorageType());
 
 // get the deletion hint node
 DatanodeDescriptor delHintNode = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22a41dce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 593ea90..a0e6701 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -76,12 +76,6 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
*/
   protected int tolerateHeartbeatMultiplier;
 
-  protected BlockPlacementPolicyDefault(Configuration conf, FSClusterStats 
stats,
-   NetworkTopology clusterMap, 
-   Host2NodesMap host2datanodeMap) {
-initialize(conf, stats, clusterMap, host2datanodeMap);
-  }
-
   protected BlockPlacementPolicyDefault() {
   }
 
@@ -174,6 +168,10 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   return getPipeline(writer,
   results.toArray(new DatanodeStorageInfo[results.size()]));
 } catch (NotEnoughReplicasException nr) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Failed to choose with favored nodes (=" + favoredNodes
++ "), disregard favored nodes hint and retry.", nr);
+  }
   // Fall back to regular block placement disregarding favored nodes hint
   return chooseTarget(src, numOfReplicas, writer, 
   new ArrayList(numOfReplicas), false, 
@@ -291,6 +289,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 unavailableStorages, newBlock);
 final EnumMap storageTypes =
 getRequiredStorageTypes(requiredStorageTypes);
+if (LOG.isTraceEnabled()) {
+  LOG.trace("storageTypes=" + storageTypes);
+}
 
 try {
   if ((numOfReplicas = requiredStorageTypes.size()) == 0) {
@@ -337,7 +338,11 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 } catch (NotEnoughReplicasException e) {
   final String message = "

git commit: YARN-2519. Credential Provider related unit tests failed on Windows. Contributed by Xiaoyu Yao.

2014-09-06 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/trunk 88209ce18 -> cbea1b10e


YARN-2519. Credential Provider related unit tests failed on Windows. 
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbea1b10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbea1b10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbea1b10

Branch: refs/heads/trunk
Commit: cbea1b10efd871d04c648af18449dc724685db74
Parents: 88209ce
Author: cnauroth 
Authored: Sat Sep 6 20:05:07 2014 -0700
Committer: cnauroth 
Committed: Sat Sep 6 20:05:07 2014 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbea1b10/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 34a206a..beafc22 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -287,6 +287,9 @@ Release 2.6.0 - UNRELEASED
 YARN-2431. NM restart: cgroup is not removed for reacquired containers
 (jlowe)
 
+YARN-2519. Credential Provider related unit tests failed on Windows.
+(Xiaoyu Yao via cnauroth)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbea1b10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
index 18600fd..2bd91b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.File;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServer2.Builder;
 import org.apache.hadoop.security.alias.CredentialProvider;
@@ -74,8 +75,9 @@ public class TestWebAppUtils {
 "target/test-dir"));
 
 Configuration conf = new Configuration();
+final Path jksPath = new Path(testDir.toString(), "test.jks");
 final String ourUrl =
-JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks";
+JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
 
 File file = new File(testDir, "test.jks");
 file.delete();



git commit: YARN-2519. Credential Provider related unit tests failed on Windows. Contributed by Xiaoyu Yao.

2014-09-06 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1ea388355 -> af1d38915


YARN-2519. Credential Provider related unit tests failed on Windows. 
Contributed by Xiaoyu Yao.

(cherry picked from commit cbea1b10efd871d04c648af18449dc724685db74)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af1d3891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af1d3891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af1d3891

Branch: refs/heads/branch-2
Commit: af1d3891570f23ef391c4b7aed3f6dedbcf9277b
Parents: 1ea3883
Author: cnauroth 
Authored: Sat Sep 6 20:05:07 2014 -0700
Committer: cnauroth 
Committed: Sat Sep 6 20:06:44 2014 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af1d3891/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cf9fe6e..83274b6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -264,6 +264,9 @@ Release 2.6.0 - UNRELEASED
 YARN-2431. NM restart: cgroup is not removed for reacquired containers
 (jlowe)
 
+YARN-2519. Credential Provider related unit tests failed on Windows.
+(Xiaoyu Yao via cnauroth)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af1d3891/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
index 18600fd..2bd91b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.File;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServer2.Builder;
 import org.apache.hadoop.security.alias.CredentialProvider;
@@ -74,8 +75,9 @@ public class TestWebAppUtils {
 "target/test-dir"));
 
 Configuration conf = new Configuration();
+final Path jksPath = new Path(testDir.toString(), "test.jks");
 final String ourUrl =
-JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks";
+JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
 
 File file = new File(testDir, "test.jks");
 file.delete();



[2/2] git commit: HDFS-6898. DN must reserve space for a full block when an RBW block is created. (Contributed by Arpit Agarwal)

2014-09-06 Thread arp
HDFS-6898. DN must reserve space for a full block when an RBW block is created. 
(Contributed by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1fa5829
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1fa5829
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1fa5829

Branch: refs/heads/trunk
Commit: d1fa58292e87bc29b4ef1278368c2be938a0afc4
Parents: cbea1b1
Author: arp 
Authored: Sat Sep 6 20:02:40 2014 -0700
Committer: arp 
Committed: Sat Sep 6 21:04:29 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../server/datanode/ReplicaBeingWritten.java|  12 +-
 .../hdfs/server/datanode/ReplicaInPipeline.java |  33 ++-
 .../hdfs/server/datanode/ReplicaInfo.java   |   7 +
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  11 +
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   6 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  15 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  58 +++-
 .../server/datanode/TestDirectoryScanner.java   |   8 +
 .../fsdataset/impl/TestRbwSpaceReservation.java | 288 +++
 .../fsdataset/impl/TestWriteToReplica.java  |   2 +-
 12 files changed, 423 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1fa5829/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4412b30..3d43171 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -612,6 +612,9 @@ Release 2.6.0 - UNRELEASED
 HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
 Arpit Agarwal)
 
+HDFS-6898. DN must reserve space for a full block when an RBW block is
+created. (Arpit Agarwal)
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1fa5829/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 77fe543..240dcd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -48,7 +48,7 @@ public class HdfsConstants {
   "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol";
   
   
-  public static final int MIN_BLOCKS_FOR_WRITE = 5;
+  public static final int MIN_BLOCKS_FOR_WRITE = 1;
 
   // Long that indicates "leave current quota unchanged"
   public static final long QUOTA_DONT_SET = Long.MAX_VALUE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1fa5829/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
index 728dd38..4a89493 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
@@ -34,10 +34,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
+   * @param bytesToReserve disk space to reserve for this replica, based on
+   *   the estimated maximum block length.
*/
   public ReplicaBeingWritten(long blockId, long genStamp, 
-FsVolumeSpi vol, File dir) {
-super( blockId, genStamp, vol, dir);
+FsVolumeSpi vol, File dir, long bytesToReserve) {
+super(blockId, genStamp, vol, dir, bytesToReserve);
   }
   
   /**
@@ -60,10 +62,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
+   * @pa

[1/2] git commit: HDFS-6898. DN must reserve space for a full block when an RBW block is created. (Contributed by Arpit Agarwal)

2014-09-06 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 af1d38915 -> eba06e61c
  refs/heads/trunk cbea1b10e -> d1fa58292


HDFS-6898. DN must reserve space for a full block when an RBW block is created. 
(Contributed by Arpit Agarwal)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eba06e61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eba06e61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eba06e61

Branch: refs/heads/branch-2
Commit: eba06e61cea1d0ae19826f68ee6b8ca6ba8aadcc
Parents: af1d389
Author: arp 
Authored: Sat Sep 6 20:02:40 2014 -0700
Committer: arp 
Committed: Sat Sep 6 21:04:21 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../server/datanode/ReplicaBeingWritten.java|  12 +-
 .../hdfs/server/datanode/ReplicaInPipeline.java |  33 ++-
 .../hdfs/server/datanode/ReplicaInfo.java   |   7 +
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  11 +
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  15 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  56 +++-
 .../server/datanode/TestDirectoryScanner.java   |   8 +
 .../fsdataset/impl/TestRbwSpaceReservation.java | 288 +++
 .../fsdataset/impl/TestWriteToReplica.java  |   2 +-
 12 files changed, 421 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eba06e61/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6a44347..0393164 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -351,6 +351,9 @@ Release 2.6.0 - UNRELEASED
 HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
 Arpit Agarwal)
 
+HDFS-6898. DN must reserve space for a full block when an RBW block is
+created. (Arpit Agarwal)
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eba06e61/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 24b9fc1..2588167 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -50,7 +50,7 @@ public class HdfsConstants {
   "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol";
   
   
-  public static final int MIN_BLOCKS_FOR_WRITE = 5;
+  public static final int MIN_BLOCKS_FOR_WRITE = 1;
 
   // Long that indicates "leave current quota unchanged"
   public static final long QUOTA_DONT_SET = Long.MAX_VALUE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eba06e61/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
index 728dd38..4a89493 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
@@ -34,10 +34,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
+   * @param bytesToReserve disk space to reserve for this replica, based on
+   *   the estimated maximum block length.
*/
   public ReplicaBeingWritten(long blockId, long genStamp, 
-FsVolumeSpi vol, File dir) {
-super( blockId, genStamp, vol, dir);
+FsVolumeSpi vol, File dir, long bytesToReserve) {
+super(blockId, genStamp, vol, dir, bytesToReserve);
   }
   
   /**
@

git commit: MAPREDUCE-6077. native-task: Remove CustomModule examples in nativetask (seanzhong)

2014-09-06 Thread seanzhong
Repository: hadoop
Updated Branches:
  refs/heads/MR-2841 7c91f9b14 -> 52a8b4db9


MAPREDUCE-6077. native-task: Remove CustomModule examples in nativetask 
(seanzhong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52a8b4db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52a8b4db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52a8b4db

Branch: refs/heads/MR-2841
Commit: 52a8b4db92a35598006103c59a2ff93afc5312ee
Parents: 7c91f9b
Author: Sean Zhong 
Authored: Sat Sep 6 11:46:07 2014 +0800
Committer: Sean Zhong 
Committed: Sat Sep 6 11:46:07 2014 +0800

--
 .../CHANGES.MAPREDUCE-2841.txt  |   1 +
 .../sdk/example/CustomModule/README.txt |   1 -
 .../sdk/example/CustomModule/pom.xml| 131 ---
 .../platform/custom/CustomPlatform.java |  39 --
 .../nativetask/serde/custom/CustomWritable.java |  75 ---
 .../serde/custom/CustomWritableSerializer.java  |  33 -
 .../src/main/native/src/CustomComparator.cpp|  88 -
 7 files changed, 1 insertion(+), 367 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52a8b4db/hadoop-mapreduce-project/CHANGES.MAPREDUCE-2841.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.MAPREDUCE-2841.txt 
b/hadoop-mapreduce-project/CHANGES.MAPREDUCE-2841.txt
index 539e7be..cfc9412 100644
--- a/hadoop-mapreduce-project/CHANGES.MAPREDUCE-2841.txt
+++ b/hadoop-mapreduce-project/CHANGES.MAPREDUCE-2841.txt
@@ -23,3 +23,4 @@ MAPREDUCE-6055. native-task: findbugs, interface annotations, 
and other misc cle
 MAPREDUCE-6067. native-task: fix some counter issues (Binglin Chang)
 MAPREDUCE-6069. native-task: Lint/style fixes and removal of unused code (todd)
 MAPREDUCE-6074. native-task: fix release audit, javadoc, javac warnings (todd)
+MAPREDUCE-6077. native-task: Remove CustomModule examples in nativetask 
(seanzhong)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52a8b4db/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/sdk/example/CustomModule/README.txt
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/sdk/example/CustomModule/README.txt
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/sdk/example/CustomModule/README.txt
deleted file mode 100644
index 0ad6f1e..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/sdk/example/CustomModule/README.txt
+++ /dev/null
@@ -1 +0,0 @@
-This project is depend on hadoop and hadoop-nativetask, so in order to 
complete the build, you have to specify the hadoop dir first.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52a8b4db/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/sdk/example/CustomModule/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/sdk/example/CustomModule/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/sdk/example/CustomModule/pom.xml
deleted file mode 100644
index 5bf67c7..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/sdk/example/CustomModule/pom.xml
+++ /dev/null
@@ -1,131 +0,0 @@
-
-
-http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-  http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-  4.0.0
-  org.apache.hadoop
-  nativetask-sdk
-  2.2.0
-  nativetask-sdk
-
-  
-  
-
-  
-
-  org.apache.hadoop
-  hadoop-mapreduce-client-nativetask
-  2.2.0
-
-  
-
-  
-
-  
-org.apache.maven.plugins
-maven-jar-plugin
-  
-  
-org.apache.maven.plugins
-maven-surefire-plugin
-
-
-  
-
-  
-
-  
-
-  native
-  
-false
-  
-  
-
-  
-org.apache.maven.plugins
-maven-enforcer-plugin
-
-  
-enforce-os
-
-  enforce
-
-
-  
-
-  mac
-  unix
-  native build only supported on Mac or
-Unix
-
-  
-  true
-
-  
-
-  
-  
-org.apache

[03/16] git commit: HDFS-6979. Fix minor error in CHANGES.txt. Contributed by Chris Nauroth.

2014-09-06 Thread seanzhong
HDFS-6979. Fix minor error in CHANGES.txt. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b051327a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b051327a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b051327a

Branch: refs/heads/MR-2841
Commit: b051327ab6a01774e1dad59e1e547dd16f603789
Parents: fab9bc5
Author: cnauroth 
Authored: Fri Sep 5 11:07:41 2014 -0700
Committer: cnauroth 
Committed: Fri Sep 5 11:07:41 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b051327a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 680af55..7b8917b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -605,7 +605,7 @@ Release 2.6.0 - UNRELEASED
 HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'.
 (Xiaoyu Yao via Arpit Agarwal)
 
-HDFS-6979. hdfs.dll not produce .pdb files. (cnauroth)
+HDFS-6979. hdfs.dll does not produce .pdb files. (cnauroth)
 
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   



[02/16] git commit: HDFS-6979. hdfs.dll not produce .pdb files. Contributed by Chris Nauroth.

2014-09-06 Thread seanzhong
HDFS-6979. hdfs.dll not produce .pdb files. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fab9bc58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fab9bc58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fab9bc58

Branch: refs/heads/MR-2841
Commit: fab9bc58ec03ea81cd5ce8a8746a4ee588f7bb08
Parents: 9e941d9
Author: cnauroth 
Authored: Fri Sep 5 11:03:58 2014 -0700
Committer: cnauroth 
Committed: Fri Sep 5 11:03:58 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 +++---
 2 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fab9bc58/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a087d2..680af55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -605,6 +605,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'.
 (Xiaoyu Yao via Arpit Agarwal)
 
+HDFS-6979. hdfs.dll not produce .pdb files. (cnauroth)
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fab9bc58/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 2c4ddf6..ecdd1ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -415,11 +415,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
 
-  
+  
 
 
 
-  
+  
 
   
 
@@ -437,7 +437,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   
   
 
-
+
   
   
   



[09/16] git commit: YARN-2508. Cross Origin configuration parameters prefix are not honored (Mit Desai via jeagles)

2014-09-06 Thread seanzhong
YARN-2508. Cross Origin configuration parameters prefix are not honored (Mit 
Desai via jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6420fec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6420fec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6420fec

Branch: refs/heads/MR-2841
Commit: e6420fec0af9b8d4f424098688ae4926ff527fcf
Parents: 21c0cde
Author: Jonathan Eagles 
Authored: Fri Sep 5 19:42:40 2014 -0500
Committer: Jonathan Eagles 
Committed: Fri Sep 5 19:42:40 2014 -0500

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../timeline/webapp/CrossOriginFilterInitializer.java   | 12 +++-
 .../webapp/TestCrossOriginFilterInitializer.java|  7 ++-
 3 files changed, 16 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6420fec/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1a5ea07..34a206a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -181,6 +181,9 @@ Release 2.6.0 - UNRELEASED
 YARN-2511. Allowed all origins by default when CrossOriginFilter is
 enabled. (Jonathan Eagles via zjshen)
 
+YARN-2508. Cross Origin configuration parameters prefix are not honored
+(Mit Desai via jeagles)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6420fec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java
index 69e0188..148cc63 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.timeline.webapp;
 
+import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
@@ -37,6 +38,15 @@ public class CrossOriginFilterInitializer extends 
FilterInitializer {
   }
 
   static Map getFilterParameters(Configuration conf) {
-return conf.getValByRegex(PREFIX);
+Map filterParams =
+new HashMap();
+for (Map.Entry entry : conf.getValByRegex(PREFIX)
+.entrySet()) {
+  String name = entry.getKey();
+  String value = entry.getValue();
+  name = name.substring(PREFIX.length());
+  filterParams.put(name, value);
+}
+return filterParams;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6420fec/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java
index 3199aac..cf26368 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java
@@ -42,11 +42,8 @@ public class TestCrossOriginFilterInitializer {
 CrossOriginFilterInitializer.getFilterParameters(conf);
 
 // retrieve values
-String rootvalue =
-filterParameters.get(CrossOriginFilterInitializer.PREFIX + 
"rootparam");
-String nestedvalue =
-filterParameters.get(CrossOriginFilterIniti

[13/16] git commit: HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko.

2014-09-06 Thread seanzhong
HDFS-6940. Refactoring to allow ConsensusNode implementation.
Contributed by Konstantin Shvachko.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88209ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88209ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88209ce1

Branch: refs/heads/MR-2841
Commit: 88209ce181b5ecc55c0ae2bceff4893ab4817e88
Parents: 3b35f81
Author: Konstantin V Shvachko 
Authored: Sat Sep 6 12:07:52 2014 -0700
Committer: Konstantin V Shvachko 
Committed: Sat Sep 6 12:07:52 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../server/blockmanagement/BlockManager.java| 23 --
 .../server/blockmanagement/DatanodeManager.java |  6 ++-
 .../server/blockmanagement/HostFileManager.java |  4 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 46 +++-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 57 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 333bdce..4412b30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
 (Dave Marion and Haohui Mai via jing9)
 
+HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
+
   OPTIMIZATIONS
 
 HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8470680..6176188 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
+  Daemon replicationThread;
   
   /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,6 +263,7 @@ public class BlockManager {
 this.namesystem = namesystem;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
+setReplicationMonitor(new ReplicationMonitor());
 
 final long pendingPeriod = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -394,7 +395,23 @@ public class BlockManager {
   lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
 }
   }
-  
+
+  public long getReplicationRecheckInterval() {
+return replicationRecheckInterval;
+  }
+
+  public AtomicLong excessBlocksCount() {
+return excessBlocksCount;
+  }
+
+  public void clearInvalidateBlocks() {
+invalidateBlocks.clear();
+  }
+
+  void setReplicationMonitor(Runnable replicationMonitor) {
+replicationThread = new Daemon(replicationMonitor);
+  }
+
   public void setBlockPoolId(String blockPoolId) {
 if (isBlockTokenEnabled()) {
   blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1616,7 +1633,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
-  private void processPendingReplications() {
+  void processPendingReplications() {
 Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88209ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 709f060..55d616f 100644
--- 
a/hadoop-hd

[15/16] git commit: HDFS-6898. DN must reserve space for a full block when an RBW block is created. (Contributed by Arpit Agarwal)

2014-09-06 Thread seanzhong
HDFS-6898. DN must reserve space for a full block when an RBW block is created. 
(Contributed by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1fa5829
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1fa5829
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1fa5829

Branch: refs/heads/MR-2841
Commit: d1fa58292e87bc29b4ef1278368c2be938a0afc4
Parents: cbea1b1
Author: arp 
Authored: Sat Sep 6 20:02:40 2014 -0700
Committer: arp 
Committed: Sat Sep 6 21:04:29 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../server/datanode/ReplicaBeingWritten.java|  12 +-
 .../hdfs/server/datanode/ReplicaInPipeline.java |  33 ++-
 .../hdfs/server/datanode/ReplicaInfo.java   |   7 +
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  11 +
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   6 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  15 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  58 +++-
 .../server/datanode/TestDirectoryScanner.java   |   8 +
 .../fsdataset/impl/TestRbwSpaceReservation.java | 288 +++
 .../fsdataset/impl/TestWriteToReplica.java  |   2 +-
 12 files changed, 423 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1fa5829/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4412b30..3d43171 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -612,6 +612,9 @@ Release 2.6.0 - UNRELEASED
 HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
 Arpit Agarwal)
 
+HDFS-6898. DN must reserve space for a full block when an RBW block is
+created. (Arpit Agarwal)
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1fa5829/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 77fe543..240dcd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -48,7 +48,7 @@ public class HdfsConstants {
   "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol";
   
   
-  public static final int MIN_BLOCKS_FOR_WRITE = 5;
+  public static final int MIN_BLOCKS_FOR_WRITE = 1;
 
   // Long that indicates "leave current quota unchanged"
   public static final long QUOTA_DONT_SET = Long.MAX_VALUE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1fa5829/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
index 728dd38..4a89493 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
@@ -34,10 +34,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
+   * @param bytesToReserve disk space to reserve for this replica, based on
+   *   the estimated maximum block length.
*/
   public ReplicaBeingWritten(long blockId, long genStamp, 
-FsVolumeSpi vol, File dir) {
-super( blockId, genStamp, vol, dir);
+FsVolumeSpi vol, File dir, long bytesToReserve) {
+super(blockId, genStamp, vol, dir, bytesToReserve);
   }
   
   /**
@@ -60,10 +62,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
+   * @

[11/16] git commit: HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to determine if in proxyuser mode or not. (tucu)

2014-09-06 Thread seanzhong
HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to 
determine if in proxyuser mode or not. (tucu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f3c19c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f3c19c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f3c19c1

Branch: refs/heads/MR-2841
Commit: 0f3c19c1bb9e341d8aed132ba3eb9e7fc7588306
Parents: 71c8d73
Author: Alejandro Abdelnur 
Authored: Fri Sep 5 10:04:07 2014 -0700
Committer: Alejandro Abdelnur 
Committed: Fri Sep 5 21:59:12 2014 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt| 3 +++
 .../org/apache/hadoop/crypto/key/kms/KMSClientProvider.java| 6 +++---
 .../java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java  | 6 +++---
 3 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3c19c1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9aef131..c77fddc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -765,6 +765,9 @@ Release 2.6.0 - UNRELEASED
 HADOOP-11067. warning message 'ssl.client.truststore.location has not
 been set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
 
+HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to
+determine if in proxyuser mode or not. (tucu)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3c19c1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index a4e336c..acbe096 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -385,9 +385,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   // if current UGI is different from UGI at constructor time, behave as
   // proxyuser
   UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
-  final String doAsUser =
-  (loginUgi.getShortUserName().equals(currentUgi.getShortUserName()))
-  ? null : currentUgi.getShortUserName();
+  final String doAsUser = (currentUgi.getAuthenticationMethod() ==
+  UserGroupInformation.AuthenticationMethod.PROXY)
+  ? currentUgi.getShortUserName() : null;
 
   // creating the HTTP connection using the current UGI at constructor time
   conn = loginUgi.doAs(new PrivilegedExceptionAction() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3c19c1/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index f381fa0..b921c84 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -1157,7 +1157,7 @@ public class TestKMS {
 final URI uri = createKMSUri(getKMSUrl());
 
 // proxyuser client using kerberos credentials
-UserGroupInformation clientUgi = UserGroupInformation.
+final UserGroupInformation clientUgi = UserGroupInformation.
 loginUserFromKeytabAndReturnUGI("client", 
keytab.getAbsolutePath());
 clientUgi.doAs(new PrivilegedExceptionAction() {
   @Override
@@ -1167,7 +1167,7 @@ public class TestKMS {
 
 // authorized proxyuser
 UserGroupInformation fooUgi =
-UserGroupInformation.createRemoteUser("foo");
+UserGroupInformation.createProxyUser("foo", clientUgi);
 fooUgi.doAs(new PrivilegedExceptionAction() {
   @Override
   public Void run() throws Exception {
@@ -1179,7 +1179,7 @@ public class TestKMS {
 
 // unauthorized proxyuser
  

[12/16] git commit: HDFS-6986. DistributedFileSystem must get delegation tokens from configured KeyProvider. (zhz via tucu)

2014-09-06 Thread seanzhong
HDFS-6986. DistributedFileSystem must get delegation tokens from configured 
KeyProvider. (zhz via tucu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b35f816
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b35f816
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b35f816

Branch: refs/heads/MR-2841
Commit: 3b35f81603bbfae119762b50bcb46de70a421368
Parents: 0f3c19c
Author: Alejandro Abdelnur 
Authored: Fri Sep 5 22:33:48 2014 -0700
Committer: Alejandro Abdelnur 
Committed: Fri Sep 5 22:33:48 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++
 .../hadoop/hdfs/DistributedFileSystem.java  | 24 +++
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 43 
 4 files changed, 74 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b35f816/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0772ea6..333bdce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -711,6 +711,9 @@ Release 2.6.0 - UNRELEASED
   HDFS-6714. TestBlocksScheduledCounter#testBlocksScheduledCounter should
   shutdown cluster (vinayakumarb)
 
+  HDFS-6986. DistributedFileSystem must get delegation tokens from 
configured 
+  KeyProvider. (zhz via tucu)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b35f816/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8daf912..e4215f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3084,4 +3084,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   DFSHedgedReadMetrics getHedgedReadMetrics() {
 return HEDGED_READ_METRIC;
   }
+
+  public KeyProviderCryptoExtension getKeyProvider() {
+return provider;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b35f816/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index bf7d62e..dbdf5c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -84,8 +84,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -1946,6 +1948,28 @@ public class DistributedFileSystem extends FileSystem {
 }.resolve(this, absF);
   }
 
+  @Override
+  public Token[] addDelegationTokens(
+  final String renewer, Credentials credentials) throws IOException {
+Token[] tokens = super.addDelegationTokens(renewer, credentials);
+if (dfs.getKeyProvider() != null) {
+  KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension =
+  KeyProviderDelegationTokenExtension.
+  createKeyProviderDelegationTokenExtension(dfs.getKeyProvider());
+  Token[] kpTokens = keyProviderDelegationTokenExtension.
+  addDelegationTokens(renewer, credentials);
+  if (tokens != null && kpTokens != null) {
+Token[] all = new Token[tokens.length + kpTokens.length];
+System.arraycopy(tokens, 0, all, 0, tokens.length);
+System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
+tokens = all;
+  } else {
+tokens = (tokens != null) ? tokens : kpTokens;
+  }
+}
+return tokens;
+  }
+
   public DFSInotifyEventInputStream getInotifyEvent

[05/16] git commit: HDFS-6998. warning message 'ssl.client.truststore.location has not been set' gets printed for hftp command. (Contributed by Xiaoyu Yao)

2014-09-06 Thread seanzhong
HDFS-6998. warning message 'ssl.client.truststore.location has not been set' 
gets printed for hftp command. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71269f70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71269f70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71269f70

Branch: refs/heads/MR-2841
Commit: 71269f70971dc7aa7bcb5e78b19cb3f04fdaa2f4
Parents: 9609b73
Author: arp 
Authored: Fri Sep 5 11:14:10 2014 -0700
Committer: arp 
Committed: Fri Sep 5 11:14:10 2014 -0700

--
 .../org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71269f70/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
index aabb815..4b81e17 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
@@ -212,7 +212,7 @@ public class FileBasedKeyStoresFactory implements 
KeyStoresFactory {
   LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
   trustManagers = new TrustManager[]{trustManager};
 } else {
-  LOG.warn("The property '" + locationProperty + "' has not been set, " +
+  LOG.debug("The property '" + locationProperty + "' has not been set, " +
   "no TrustStore will be loaded");
   trustManagers = null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71269f70/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0772ea6..5c4aeea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -610,6 +610,9 @@ Release 2.6.0 - UNRELEASED
 HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
 Arpit Agarwal)
 
+HDFS-6998. warning message 'ssl.client.truststore.location has not been
+set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating & deleting an



[08/16] git commit: HADOOP-11067 [HDFS-6998]. Fix CHANGES.txt

2014-09-06 Thread seanzhong
HADOOP-11067 [HDFS-6998]. Fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21c0cdee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21c0cdee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21c0cdee

Branch: refs/heads/MR-2841
Commit: 21c0cdeec1034b18ad3a2d5b71941a84bcea5ebe
Parents: 0571b45
Author: arp 
Authored: Fri Sep 5 11:18:20 2014 -0700
Committer: arp 
Committed: Fri Sep 5 14:29:57 2014 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ---
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21c0cdee/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d20bf08..88804cd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -760,6 +760,9 @@ Release 2.6.0 - UNRELEASED
 HADOOP-11063. KMS cannot deploy on Windows, because class names are too 
long.
 (cnauroth)
 
+HADOOP-11067. warning message 'ssl.client.truststore.location has not
+been set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21c0cdee/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5c4aeea..0772ea6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -610,9 +610,6 @@ Release 2.6.0 - UNRELEASED
 HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
 Arpit Agarwal)
 
-HDFS-6998. warning message 'ssl.client.truststore.location has not been
-set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
-
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating & deleting an



[10/16] git commit: HADOOP-11070. Create MiniKMS for testing. (tucu)

2014-09-06 Thread seanzhong
HADOOP-11070. Create MiniKMS for testing. (tucu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71c8d735
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71c8d735
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71c8d735

Branch: refs/heads/MR-2841
Commit: 71c8d735f5038e3b516947f12180d7568b6979dc
Parents: e6420fe
Author: Alejandro Abdelnur 
Authored: Fri Sep 5 14:09:22 2014 -0700
Committer: Alejandro Abdelnur 
Committed: Fri Sep 5 21:59:12 2014 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 hadoop-common-project/hadoop-kms/pom.xml|   4 +-
 .../hadoop/crypto/key/kms/server/MiniKMS.java   | 197 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   |  82 +---
 4 files changed, 211 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c8d735/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 88804cd..9aef131 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -507,6 +507,8 @@ Release 2.6.0 - UNRELEASED
 HADOOP-11060. Create a CryptoCodec test that verifies interoperability 
 between the JCE and OpenSSL implementations. (hitliuyi via tucu)
 
+HADOOP-11070. Create MiniKMS for testing. (tucu)
+
   OPTIMIZATIONS
 
 HADOOP-10838. Byte array native checksumming. (James Thomas via todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c8d735/hadoop-common-project/hadoop-kms/pom.xml
--
diff --git a/hadoop-common-project/hadoop-kms/pom.xml 
b/hadoop-common-project/hadoop-kms/pom.xml
index 3bb97c5..629ffda 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -222,9 +222,9 @@
 
 
   
-
+
 
-
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c8d735/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
new file mode 100644
index 000..5a6d4c5
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.security.SslSocketConnector;
+import org.mortbay.jetty.webapp.WebAppContext;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.Writer;
+import java.net.InetAddress;
+import java.net.MalformedURLException;
+import java.net.ServerSocket;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+
+public class MiniKMS {
+
+  private static Server createJettyServer(String keyStore, String password) {
+try {
+  boolean ssl = keyStore != null;
+  InetAddress localhost = InetAddress.getByName("localhost");
+  String host = "localhost";
+  ServerSocket ss = new ServerSocket(0, 50, localhost);
+  int port = ss.getLocalPort();
+  ss.close();
+  Server server = new Server(0);
+  if (!ssl) {
+server.getConnectors()[0].setHost(host);
+server.getConnectors()[0].setPort(port);
+  } else {
+SslSocketConnector c = new SslSocketConnector();

[01/16] git commit: HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'. (Contributed by Xiaoyu Yao)

2014-09-06 Thread seanzhong
Repository: hadoop
Updated Branches:
  refs/heads/MR-2841 52a8b4db9 -> b160707ac


HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'. 
(Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e941d9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e941d9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e941d9f

Branch: refs/heads/MR-2841
Commit: 9e941d9f99168cae01f8d50622a616fc26c196d9
Parents: c6107f5
Author: arp 
Authored: Fri Sep 5 10:45:54 2014 -0700
Committer: arp 
Committed: Fri Sep 5 10:45:54 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 175 ---
 .../java/org/apache/hadoop/tools/TestTools.java |   2 +-
 3 files changed, 81 insertions(+), 99 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e941d9f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d4059de..5a087d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -602,6 +602,9 @@ Release 2.6.0 - UNRELEASED
 HDFS-6996. SnapshotDiff report can hit IndexOutOfBoundsException when there
 are nested renamed directory/file. (jing9)
 
+HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'.
+(Xiaoyu Yao via Arpit Agarwal)
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e941d9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index ad7be18..13ccae5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -353,6 +353,40 @@ public class DFSAdmin extends FsShell {
   }
 
   /**
+   * Common usage summary shared between "hdfs dfsadmin -help" and
+   * "hdfs dfsadmin"
+   */
+  private static final String commonUsageSummary =
+"\t[-report [-live] [-dead] [-decommissioning]]\n" +
+"\t[-safemode ]\n" +
+"\t[-saveNamespace]\n" +
+"\t[-rollEdits]\n" +
+"\t[-restoreFailedStorage true|false|check]\n" +
+"\t[-refreshNodes]\n" +
+"\t[" + SetQuotaCommand.USAGE + "]\n" +
+"\t[" + ClearQuotaCommand.USAGE +"]\n" +
+"\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
+"\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
+"\t[-finalizeUpgrade]\n" +
+"\t[" + RollingUpgradeCommand.USAGE +"]\n" +
+"\t[-refreshServiceAcl]\n" +
+"\t[-refreshUserToGroupsMappings]\n" +
+"\t[-refreshSuperUserGroupsConfiguration]\n" +
+"\t[-refreshCallQueue]\n" +
+"\t[-refresh   [arg1..argn]\n" +
+"\t[-printTopology]\n" +
+"\t[-refreshNamenodes datanode_host:ipc_port]\n"+
+"\t[-deleteBlockPool datanode_host:ipc_port blockpoolId [force]]\n"+
+"\t[-setBalancerBandwidth ]\n" +
+"\t[-fetchImage ]\n" +
+"\t[-allowSnapshot ]\n" +
+"\t[-disallowSnapshot ]\n" +
+"\t[-shutdownDatanode  [upgrade]]\n" +
+"\t[-getDatanodeInfo ]\n" +
+"\t[-metasave filename]\n" +
+"\t[-help [cmd]]\n";
+
+  /**
* Construct a DFSAdmin object.
*/
   public DFSAdmin() {
@@ -589,7 +623,7 @@ public class DFSAdmin extends FsShell {
   
   /**
* Command to ask the namenode to save the namespace.
-   * Usage: java DFSAdmin -saveNamespace
+   * Usage: hdfs dfsadmin -saveNamespace
* @exception IOException 
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
*/
@@ -630,7 +664,7 @@ public class DFSAdmin extends FsShell {
   
   /**
* Command to enable/disable/check restoring of failed storage replicas in 
the namenode.
-   * Usage: java DFSAdmin -restoreFailedStorage true|false|check
+   * Usage: hdfs dfsadmin -restoreFailedStorage true|false|check
* @exception IOException 
* @see 
org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
*/
@@ -668,7 +702,7 @@ public class DFSAdmin extends FsShell {
   /**
* Command to ask the namenode to reread the hosts and excluded hosts 
* file.
-   * Usage: java DFSAdmin -refreshNodes
+   * Usage: hdfs dfsadmin -refreshNodes
* @exception IOException 
*/
   public int refreshNodes(

[04/16] git commit: HDFS-6862. Add missing timeout annotations to tests. (Contributed by Xiaoyu Yao)

2014-09-06 Thread seanzhong
HDFS-6862. Add missing timeout annotations to tests. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9609b730
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9609b730
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9609b730

Branch: refs/heads/MR-2841
Commit: 9609b7303a98c8eff676c5a086b08b1ca9ab777c
Parents: b051327
Author: arp 
Authored: Fri Sep 5 11:08:03 2014 -0700
Committer: arp 
Committed: Fri Sep 5 11:10:58 2014 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../apache/hadoop/hdfs/TestHDFSServerPorts.java | 24 
 .../TestValidateConfigurationSettings.java  | 16 ++---
 .../namenode/ha/TestDelegationTokensWithHA.java | 63 
 .../hdfs/server/namenode/ha/TestHAMetrics.java  | 10 ++--
 .../namenode/ha/TestHAStateTransitions.java | 52 +++-
 .../namenode/ha/TestStandbyCheckpoints.java | 54 +++--
 7 files changed, 97 insertions(+), 125 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9609b730/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7b8917b..0772ea6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -607,6 +607,9 @@ Release 2.6.0 - UNRELEASED
 
 HDFS-6979. hdfs.dll does not produce .pdb files. (cnauroth)
 
+HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
+Arpit Agarwal)
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating & deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9609b730/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index 59d1615..ce8a4e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -17,14 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.UnknownHostException;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -39,6 +31,14 @@ import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.test.PathUtils;
 import org.junit.Test;
 
+import java.io.File;
+import java.io.IOException;
+import java.net.UnknownHostException;
+
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 /**
  * This test checks correctness of port usage by hdfs components:
  * NameNode, DataNode, SecondaryNamenode and BackupNode.
@@ -245,7 +245,7 @@ public class TestHDFSServerPorts {
 return true;
   }
 
-  @Test
+  @Test(timeout = 30)
   public void testNameNodePorts() throws Exception {
 runTestNameNodePorts(false);
 runTestNameNodePorts(true);
@@ -296,7 +296,7 @@ public class TestHDFSServerPorts {
   /**
* Verify datanode port usage.
*/
-  @Test
+  @Test(timeout = 30)
   public void testDataNodePorts() throws Exception {
 NameNode nn = null;
 try {
@@ -332,7 +332,7 @@ public class TestHDFSServerPorts {
   /**
* Verify secondary namenode port usage.
*/
-  @Test
+  @Test(timeout = 30)
   public void testSecondaryNodePorts() throws Exception {
 NameNode nn = null;
 try {
@@ -361,7 +361,7 @@ public class TestHDFSServerPorts {
 /**
  * Verify BackupNode port usage.
  */
-  @Test
+@Test(timeout = 30)
 public void testBackupNodePorts() throws Exception {
   NameNode nn = null;
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9609b730/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/s

[07/16] git commit: HADOOP-11065. Rat check should exclude **/build/**. (kasha)

2014-09-06 Thread seanzhong
HADOOP-11065. Rat check should exclude **/build/**. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0571b456
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0571b456
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0571b456

Branch: refs/heads/MR-2841
Commit: 0571b4561bad7e0230920e52d3758a3658fcf20d
Parents: 7a62515
Author: Karthik Kambatla 
Authored: Fri Sep 5 13:08:59 2014 -0700
Committer: Karthik Kambatla 
Committed: Fri Sep 5 13:09:10 2014 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 pom.xml | 1 +
 2 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0571b456/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index afd1cc7..d20bf08 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -777,6 +777,8 @@ Release 2.5.1 - UNRELEASED
 
 HADOOP-11001. Fix test-patch to work with the git repo. (kasha)
 
+HADOOP-11065. Rat check should exclude "**/build/**". (kasha)
+
 Release 2.5.0 - 2014-08-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0571b456/pom.xml
--
diff --git a/pom.xml b/pom.xml
index a4f8241..5cc30c2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -324,6 +324,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 .gitignore
 .git/**
 .idea/**
+   **/build/**
  

   



[14/16] git commit: YARN-2519. Credential Provider related unit tests failed on Windows. Contributed by Xiaoyu Yao.

2014-09-06 Thread seanzhong
YARN-2519. Credential Provider related unit tests failed on Windows. 
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbea1b10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbea1b10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbea1b10

Branch: refs/heads/MR-2841
Commit: cbea1b10efd871d04c648af18449dc724685db74
Parents: 88209ce
Author: cnauroth 
Authored: Sat Sep 6 20:05:07 2014 -0700
Committer: cnauroth 
Committed: Sat Sep 6 20:05:07 2014 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbea1b10/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 34a206a..beafc22 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -287,6 +287,9 @@ Release 2.6.0 - UNRELEASED
 YARN-2431. NM restart: cgroup is not removed for reacquired containers
 (jlowe)
 
+YARN-2519. Credential Provider related unit tests failed on Windows.
+(Xiaoyu Yao via cnauroth)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbea1b10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
index 18600fd..2bd91b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.File;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServer2.Builder;
 import org.apache.hadoop.security.alias.CredentialProvider;
@@ -74,8 +75,9 @@ public class TestWebAppUtils {
 "target/test-dir"));
 
 Configuration conf = new Configuration();
+final Path jksPath = new Path(testDir.toString(), "test.jks");
 final String ourUrl =
-JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks";
+JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
 
 File file = new File(testDir, "test.jks");
 file.delete();



[06/16] git commit: HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked in bin/hdfs (aw)

2014-09-06 Thread seanzhong
HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked in bin/hdfs 
(aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a62515c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a62515c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a62515c

Branch: refs/heads/MR-2841
Commit: 7a62515c8628430a163415e42c9526a123db213c
Parents: 71269f7
Author: Allen Wittenauer 
Authored: Fri Sep 5 11:31:49 2014 -0700
Committer: Allen Wittenauer 
Committed: Fri Sep 5 11:31:49 2014 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../hadoop-common/src/main/bin/hadoop-functions.sh   |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs| 15 +++
 3 files changed, 12 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a62515c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d38fae9..afd1cc7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -330,6 +330,9 @@ Trunk (Unreleased)
 
 HADOOP-11033. shell scripts ignore JAVA_HOME on OS X. (aw)
 
+HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked 
+in bin/hdfs (aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a62515c/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index d430188..1677cc0 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -644,9 +644,9 @@ function hadoop_verify_secure_prereq
   # this.
   
   # ${EUID} comes from the shell itself!
-  if [[ "${EUID}" -ne 0 ]] || [[ -n "${HADOOP_SECURE_COMMAND}" ]]; then
+  if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
 hadoop_error "ERROR: You must be a privileged in order to run a secure 
serice."
-return 1
+exit 1
   else
 return 0
   fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a62515c/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 6872a0e..2300dbf 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -225,14 +225,13 @@ esac
 
 if [[ -n "${secure_service}" ]]; then
   HADOOP_SECURE_USER="${secure_user}"
-  if hadoop_verify_secure_prereq; then
-hadoop_setup_secure_service
-
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out"
-
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err"
-
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid"
-
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
-
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
-  fi
+  hadoop_verify_secure_prereq
+  hadoop_setup_secure_service
+  
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out"
+  
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err"
+  
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid"
+  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
+  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
 else
   
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
   
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid"



[16/16] git commit: Merge branch 'trunk' into MR-2841

2014-09-06 Thread seanzhong
Merge branch 'trunk' into MR-2841


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b160707a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b160707a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b160707a

Branch: refs/heads/MR-2841
Commit: b160707ace7900f360fa914cd291675c1d9703ec
Parents: 52a8b4d d1fa582
Author: Sean Zhong 
Authored: Sun Sep 7 13:01:27 2014 +0800
Committer: Sean Zhong 
Committed: Sun Sep 7 13:01:27 2014 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  13 +
 .../src/main/bin/hadoop-functions.sh|   4 +-
 .../crypto/key/kms/KMSClientProvider.java   |   6 +-
 .../security/ssl/FileBasedKeyStoresFactory.java |   2 +-
 hadoop-common-project/hadoop-kms/pom.xml|   4 +-
 .../hadoop/crypto/key/kms/server/MiniKMS.java   | 197 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   |  88 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  16 ++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   6 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |  15 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +
 .../hadoop/hdfs/DistributedFileSystem.java  |  24 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../server/blockmanagement/BlockManager.java|  23 +-
 .../server/blockmanagement/DatanodeManager.java |   6 +-
 .../server/blockmanagement/HostFileManager.java |   4 +
 .../server/datanode/ReplicaBeingWritten.java|  12 +-
 .../hdfs/server/datanode/ReplicaInPipeline.java |  33 ++-
 .../hdfs/server/datanode/ReplicaInfo.java   |   7 +
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  11 +
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   6 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  15 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  58 +++-
 .../hdfs/server/namenode/FSNamesystem.java  |  46 +--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 175 +--
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  43 +++
 .../apache/hadoop/hdfs/TestHDFSServerPorts.java |  24 +-
 .../server/datanode/TestDirectoryScanner.java   |   8 +
 .../fsdataset/impl/TestRbwSpaceReservation.java | 288 +++
 .../fsdataset/impl/TestWriteToReplica.java  |   2 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   2 +-
 .../TestValidateConfigurationSettings.java  |  16 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |  63 ++--
 .../hdfs/server/namenode/ha/TestHAMetrics.java  |  10 +-
 .../namenode/ha/TestHAStateTransitions.java |  52 ++--
 .../namenode/ha/TestStandbyCheckpoints.java |  54 ++--
 .../java/org/apache/hadoop/tools/TestTools.java |   2 +-
 hadoop-yarn-project/CHANGES.txt |   6 +
 .../yarn/webapp/util/TestWebAppUtils.java   |   4 +-
 .../webapp/CrossOriginFilterInitializer.java|  12 +-
 .../TestCrossOriginFilterInitializer.java   |   7 +-
 pom.xml |   1 +
 42 files changed, 998 insertions(+), 373 deletions(-)
--