hadoop git commit: HDFS-7725. Incorrect 'nodes in service' metrics caused all writes to fail. Contributed by Ming Ma. (cherry picked from commit 8104d522690fe9556177893770a388291cea0749)

2015-10-27 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 653ef52ef -> 1d23e1ec0


HDFS-7725. Incorrect 'nodes in service' metrics caused all writes to fail. 
Contributed by Ming Ma.
(cherry picked from commit 8104d522690fe9556177893770a388291cea0749)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d23e1ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d23e1ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d23e1ec

Branch: refs/heads/branch-2.7
Commit: 1d23e1ec073489bfc8a04a08350a2c46efbd466f
Parents: 653ef52
Author: Kihwal Lee 
Authored: Tue Oct 27 11:09:05 2015 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 27 11:09:05 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../blockmanagement/DecommissionManager.java| 28 +--
 .../blockmanagement/HeartbeatManager.java   | 29 ++--
 .../namenode/TestNamenodeCapacityReport.java|  5 
 4 files changed, 41 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d23e1ec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 50bc0c4..7c09896 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -28,6 +28,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-6945. BlockManager should remove a block from excessReplicateMap and
 decrement ExcessBlocks metric when the block is removed. (aajisaka)
 
+HDFS-7725. Incorrect "nodes in service" metrics caused all writes to fail.
+(Ming Ma via wang)
+
 HDFS-8806. Inconsistent metrics: number of missing blocks with replication
 factor 1 not properly cleared. (Zhe Zhang via aajisaka)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d23e1ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 9355329..7f3d778 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -197,23 +197,21 @@ public class DecommissionManager {
*/
   @VisibleForTesting
   public void startDecommission(DatanodeDescriptor node) {
-if (!node.isDecommissionInProgress()) {
-  if (!node.isAlive) {
-LOG.info("Dead node {} is decommissioned immediately.", node);
-node.setDecommissioned();
-  } else if (!node.isDecommissioned()) {
+if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
+  // Update DN stats maintained by HeartbeatManager
+  hbManager.startDecommission(node);
+  // hbManager.startDecommission will set dead node to decommissioned.
+  if (node.isDecommissionInProgress()) {
 for (DatanodeStorageInfo storage : node.getStorageInfos()) {
-  LOG.info("Starting decommission of {} {} with {} blocks", 
+  LOG.info("Starting decommission of {} {} with {} blocks",
   node, storage, storage.numBlocks());
 }
-// Update DN stats maintained by HeartbeatManager
-hbManager.startDecommission(node);
 node.decommissioningStatus.setStartTime(monotonicNow());
 pendingNodes.add(node);
   }
 } else {
-  LOG.trace("startDecommission: Node {} is already decommission in "
-  + "progress, nothing to do.", node);
+  LOG.trace("startDecommission: Node {} in {}, nothing to do." +
+  node, node.getAdminState());
 }
   }
 
@@ -221,12 +219,12 @@ public class DecommissionManager {
* Stop decommissioning the specified datanode. 
* @param node
*/
-  void stopDecommission(DatanodeDescriptor node) {
+  @VisibleForTesting
+  public void stopDecommission(DatanodeDescriptor node) {
 if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-  LOG.info("Stopping decommissioning of node {}", node);
   // Update DN stats maintained by HeartbeatManager
   hbManager.stopDecommission(node);
-  // Over-replicated blocks will be detected and processed when 
+  // Over-replicated blocks will be detected and process

hadoop git commit: HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in the allowed list. Contributed by Daniel Templeton.

2015-10-27 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 1d23e1ec0 -> 59a207213


HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in the 
allowed list.  Contributed by  Daniel Templeton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59a20721
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59a20721
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59a20721

Branch: refs/heads/branch-2.7
Commit: 59a207213597f1dc94afc9d22e693165d6fd2792
Parents: 1d23e1e
Author: Kihwal Lee 
Authored: Tue Oct 27 16:42:33 2015 -0500
Committer: Kihwal Lee 
Committed: Tue Oct 27 16:42:33 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/DatanodeManager.java |   9 +-
 .../server/blockmanagement/HostFileManager.java |  19 
 .../apache/hadoop/hdfs/TestDecommission.java|  15 +--
 .../blockmanagement/TestDatanodeManager.java| 110 ++-
 .../blockmanagement/TestHostFileManager.java|   7 +-
 6 files changed, 146 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a20721/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7c09896..9b28d3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -47,6 +47,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8879. Quota by storage type usage incorrectly initialized upon 
namenode
 restart. (xyao)
 
+HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in
+the allowed list (Daniel Templeton)
+
 HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
 (Kihwal Lee via yliu)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a20721/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index d7e0721..0cf1eee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1272,11 +1272,14 @@ public class DatanodeManager {
   for (DatanodeDescriptor dn : datanodeMap.values()) {
 final boolean isDead = isDatanodeDead(dn);
 final boolean isDecommissioning = dn.isDecommissionInProgress();
-if ((listLiveNodes && !isDead) ||
+
+if (((listLiveNodes && !isDead) ||
 (listDeadNodes && isDead) ||
-(listDecommissioningNodes && isDecommissioning)) {
-nodes.add(dn);
+(listDecommissioningNodes && isDecommissioning)) &&
+hostFileManager.isIncluded(dn)) {
+  nodes.add(dn);
 }
+
 foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a20721/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
index 0b8d6c5..e05ef9a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
@@ -126,9 +126,28 @@ class HostFileManager {
 return !includes.isEmpty();
   }
 
+  /**
+   * Read the includes and excludes lists from the named files.  Any previous
+   * includes and excludes lists are discarded.
+   * @param includeFile the path to the new includes list
+   * @param excludeFile the path to the new excludes list
+   * @throws IOException thrown if there is a problem reading one of the files
+   */
   void refresh(String includeFile, String excludeFile) throws IOException {
 HostSet newIncludes = readFile("included", includeFile);
 HostSet newExcludes = readFile("excluded", excludeFile);
+

hadoop git commit: HDFS-9279. Decomissioned capacity should not be considered for configured/used capacity. Contributed by Kihu Shukla .

2015-10-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2d10cb8e0 -> 19a77f546


HDFS-9279. Decomissioned capacity should not be considered for configured/used 
capacity. Contributed by Kihu Shukla .


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19a77f54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19a77f54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19a77f54

Branch: refs/heads/trunk
Commit: 19a77f546657b086af8f41fa631099bdde7e010c
Parents: 2d10cb8
Author: Kihwal Lee 
Authored: Wed Oct 28 11:57:56 2015 -0500
Committer: Kihwal Lee 
Committed: Wed Oct 28 11:58:51 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/DatanodeStats.java   | 26 ++-
 .../apache/hadoop/hdfs/TestDecommission.java| 47 +---
 3 files changed, 58 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 184b743..7f903b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2176,6 +2176,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9302. WebHDFS throws NullPointerException if newLength is not
 provided. (Jagadesh Kiran N via yliu)
 
+HDFS-9297. Decomissioned capacity should not be considered for 
+configured/used capacity (Contributed by Kuhu Shukla)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
index 3ab0d5c..4c39c41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
@@ -45,19 +45,20 @@ class DatanodeStats {
   private int expiredHeartbeats = 0;
 
   synchronized void add(final DatanodeDescriptor node) {
-capacityUsed += node.getDfsUsed();
-blockPoolUsed += node.getBlockPoolUsed();
 xceiverCount += node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed += node.getDfsUsed();
+  blockPoolUsed += node.getBlockPoolUsed();
   nodesInService++;
   nodesInServiceXceiverCount += node.getXceiverCount();
   capacityTotal += node.getCapacity();
   capacityRemaining += node.getRemaining();
-} else {
-  capacityTotal += node.getDfsUsed();
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
 }
-cacheCapacity += node.getCacheCapacity();
-cacheUsed += node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.addStorage(storageInfo, node);
@@ -69,19 +70,20 @@ class DatanodeStats {
   }
 
   synchronized void subtract(final DatanodeDescriptor node) {
-capacityUsed -= node.getDfsUsed();
-blockPoolUsed -= node.getBlockPoolUsed();
 xceiverCount -= node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed -= node.getDfsUsed();
+  blockPoolUsed -= node.getBlockPoolUsed();
   nodesInService--;
   nodesInServiceXceiverCount -= node.getXceiverCount();
   capacityTotal -= node.getCapacity();
   capacityRemaining -= node.getRemaining();
-} else {
-  capacityTotal -= node.getDfsUsed();
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
 }
-cacheCapacity -= node.getCacheCapacity();
-cacheUsed -= node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.subtractStorage(storageInfo, node);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apa

hadoop git commit: HDFS-9279. Decomissioned capacity should not be considered for configured/used capacity. Contributed by Kihu Shukla . (cherry picked from commit 19a77f546657b086af8f41fa631099bdde7e

2015-10-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c7f87dc2d -> 75bc53a86


HDFS-9279. Decomissioned capacity should not be considered for configured/used 
capacity. Contributed by Kihu Shukla .
(cherry picked from commit 19a77f546657b086af8f41fa631099bdde7e010c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75bc53a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75bc53a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75bc53a8

Branch: refs/heads/branch-2
Commit: 75bc53a86a846b3c528164105b91604a9da9c543
Parents: c7f87dc
Author: Kihwal Lee 
Authored: Wed Oct 28 11:59:36 2015 -0500
Committer: Kihwal Lee 
Committed: Wed Oct 28 11:59:36 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/DatanodeStats.java   | 26 ++-
 .../apache/hadoop/hdfs/TestDecommission.java| 47 +---
 3 files changed, 58 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75bc53a8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bfba5d4..cdb10f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1344,6 +1344,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9302. WebHDFS throws NullPointerException if newLength is not
 provided. (Jagadesh Kiran N via yliu)
 
+HDFS-9297. Decomissioned capacity should not be considered for 
+configured/used capacity (Contributed by Kuhu Shukla)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75bc53a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
index 3ab0d5c..4c39c41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java
@@ -45,19 +45,20 @@ class DatanodeStats {
   private int expiredHeartbeats = 0;
 
   synchronized void add(final DatanodeDescriptor node) {
-capacityUsed += node.getDfsUsed();
-blockPoolUsed += node.getBlockPoolUsed();
 xceiverCount += node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed += node.getDfsUsed();
+  blockPoolUsed += node.getBlockPoolUsed();
   nodesInService++;
   nodesInServiceXceiverCount += node.getXceiverCount();
   capacityTotal += node.getCapacity();
   capacityRemaining += node.getRemaining();
-} else {
-  capacityTotal += node.getDfsUsed();
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity += node.getCacheCapacity();
+  cacheUsed += node.getCacheUsed();
 }
-cacheCapacity += node.getCacheCapacity();
-cacheUsed += node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.addStorage(storageInfo, node);
@@ -69,19 +70,20 @@ class DatanodeStats {
   }
 
   synchronized void subtract(final DatanodeDescriptor node) {
-capacityUsed -= node.getDfsUsed();
-blockPoolUsed -= node.getBlockPoolUsed();
 xceiverCount -= node.getXceiverCount();
 if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
+  capacityUsed -= node.getDfsUsed();
+  blockPoolUsed -= node.getBlockPoolUsed();
   nodesInService--;
   nodesInServiceXceiverCount -= node.getXceiverCount();
   capacityTotal -= node.getCapacity();
   capacityRemaining -= node.getRemaining();
-} else {
-  capacityTotal -= node.getDfsUsed();
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
+} else if (!node.isDecommissioned()) {
+  cacheCapacity -= node.getCacheCapacity();
+  cacheUsed -= node.getCacheUsed();
 }
-cacheCapacity -= node.getCacheCapacity();
-cacheUsed -= node.getCacheUsed();
 Set storageTypes = new HashSet<>();
 for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
   statsMap.subtractStorage(storageInfo, node);

http://git-wip-us.apache.org/repos/asf/hadoo

hadoop git commit: HDFS-9083. Replication violates block placement policy. Contributed by Rushabh Shah.

2015-10-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 336be63da -> 13b256ed2


HDFS-9083. Replication violates block placement policy. Contributed by Rushabh 
Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13b256ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13b256ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13b256ed

Branch: refs/heads/branch-2.7
Commit: 13b256ed2219078653820b544313637ce80a7120
Parents: 336be63
Author: Kihwal Lee 
Authored: Wed Oct 28 14:49:41 2015 -0500
Committer: Kihwal Lee 
Committed: Wed Oct 28 14:49:41 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../server/blockmanagement/BlockManager.java| 10 
 .../blockmanagement/TestBlockManager.java   | 24 
 3 files changed, 26 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b256ed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e63532d..bd6e9f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -62,6 +62,8 @@ Release 2.7.2 - UNRELEASED
 HDFS-9043. Doc updation for commands in HDFS Federation
 (J.Andreina via vinayakumab)
 
+HDFS-9083. Replication violates block placement policy (Rushabh Shah)
+
 HDFS-9106. Transfer failure during pipeline recovery causes permanent
 write failures (kihwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b256ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index d770346..c360d4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -224,9 +224,6 @@ public class BlockManager {
 
   final float blocksInvalidateWorkPct;
   final int blocksReplWorkMultiplier;
-
-  /** variable to enable check for enough racks */
-  final boolean shouldCheckForEnoughRacks;
   
   // whether or not to issue block encryption keys.
   final boolean encryptDataTransfer;
@@ -325,9 +322,6 @@ public class BlockManager {
 conf.getInt(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
-this.shouldCheckForEnoughRacks =
-conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
-? false : true;
 
 this.blocksInvalidateWorkPct = 
DFSUtil.getInvalidateWorkPctPerIteration(conf);
 this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
@@ -351,7 +345,6 @@ public class BlockManager {
 LOG.info("maxReplication = " + maxReplication);
 LOG.info("minReplication = " + minReplication);
 LOG.info("maxReplicationStreams  = " + maxReplicationStreams);
-LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
 LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
 LOG.info("encryptDataTransfer= " + encryptDataTransfer);
 LOG.info("maxNumBlocksToLog  = " + maxNumBlocksToLog);
@@ -3490,9 +3483,6 @@ public class BlockManager {
   }
 
   boolean blockHasEnoughRacks(Block b) {
-if (!this.shouldCheckForEnoughRacks) {
-  return true;
-}
 boolean enoughRacks = false;;
 Collection corruptNodes = 
   corruptReplicas.getNodes(b);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13b256ed/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index fba840e..e026a53 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/

hadoop git commit: HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk ce31b2273 -> 43539b5ff


HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43539b5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43539b5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43539b5f

Branch: refs/heads/trunk
Commit: 43539b5ff4ac0874a8a454dc93a2a782b0e0ea8f
Parents: ce31b22
Author: Kihwal Lee 
Authored: Fri Oct 30 09:27:21 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:29:13 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43539b5f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 38b9e55..f6a22a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,6 +2201,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43539b5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d9b8d60..f610574 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,6 +659,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
+int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -708,6 +709,17 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
+  // Refresh the node count. If the live node count became smaller,
+  // but it is not reflected in this loop, it may loop forever in case
+  // the replicas/rack cannot be satisfied.
+  if (--refreshCounter == 0) {
+refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
+excludedNodes);
+// It has already gone through enough number of nodes.
+if (refreshCounter <= excludedNodes.size()) {
+  break;
+}
+  }
 }
   
 if (numOfReplicas>0) {



hadoop git commit: HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee. (cherry picked from commit 43539b5ff4ac0874a8a454dc93a2a782

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1c80473a4 -> 0c7fd2630


HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.
(cherry picked from commit 43539b5ff4ac0874a8a454dc93a2a782b0e0ea8f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c7fd263
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c7fd263
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c7fd263

Branch: refs/heads/branch-2
Commit: 0c7fd26302109273a07b6087a9b92c9dff080816
Parents: 1c80473
Author: Kihwal Lee 
Authored: Fri Oct 30 09:30:11 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:30:11 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c7fd263/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 52775a8..fffdc91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1364,6 +1364,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c7fd263/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d9b8d60..f610574 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,6 +659,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
+int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -708,6 +709,17 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
+  // Refresh the node count. If the live node count became smaller,
+  // but it is not reflected in this loop, it may loop forever in case
+  // the replicas/rack cannot be satisfied.
+  if (--refreshCounter == 0) {
+refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
+excludedNodes);
+// It has already gone through enough number of nodes.
+if (refreshCounter <= excludedNodes.size()) {
+  break;
+}
+  }
 }
   
 if (numOfReplicas>0) {



hadoop git commit: HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee. (cherry picked from commit 43539b5ff4ac0874a8a454dc93a2a782

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c84ec0657 -> c250b21c2


HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.
(cherry picked from commit 43539b5ff4ac0874a8a454dc93a2a782b0e0ea8f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c250b21c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c250b21c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c250b21c

Branch: refs/heads/branch-2.7
Commit: c250b21c23945ce2c580186c224cc65ab2b501fc
Parents: c84ec06
Author: Kihwal Lee 
Authored: Fri Oct 30 09:39:49 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:39:49 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../blockmanagement/BlockPlacementPolicyDefault.java| 12 
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c250b21c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bd6e9f0..59a1717 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -89,6 +89,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c250b21c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 97ea782..086abca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -622,6 +622,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
+int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -675,6 +676,17 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (i == storages.length);
   }
+  // Refresh the node count. If the live node count became smaller,
+  // but it is not reflected in this loop, it may loop forever in case
+  // the replicas/rack cannot be satisfied.
+  if (--refreshCounter == 0) {
+refreshCounter = clusterMap.countNumOfAvailableNodes(scope,
+excludedNodes);
+// It has already gone through enough number of nodes.
+if (refreshCounter <= excludedNodes.size()) {
+  break;
+}
+  }
 }
   
 if (numOfReplicas>0) {



hadoop git commit: fix CHANGES.txt

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 43539b5ff -> 3c0204a58


fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c0204a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c0204a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c0204a5

Branch: refs/heads/trunk
Commit: 3c0204a5866520e74917b26b6ac2061650a5bb6d
Parents: 43539b5
Author: Kihwal Lee 
Authored: Fri Oct 30 09:40:41 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:40:41 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0204a5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f6a22a1..c5846b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2201,9 +2201,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2276,6 +2273,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



hadoop git commit: fix CHANGES.txt

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0c7fd2630 -> 4c33f883c


fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c33f883
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c33f883
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c33f883

Branch: refs/heads/branch-2
Commit: 4c33f883c468ad17528041f3ffb00f5603fccc8f
Parents: 0c7fd26
Author: Kihwal Lee 
Authored: Fri Oct 30 09:41:45 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 09:41:45 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c33f883/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fffdc91..b6e246e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1364,9 +1364,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9332. Fix Precondition failures from NameNodeEditLogRoller while
 saving namespace. (wang)
 
-HDFS-4937. ReplicationMonitor can infinite-loop in
-BlockPlacementPolicyDefault#chooseRandom() (kihwal)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1439,6 +1436,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9317. Document fsck -blockId and -storagepolicy options in branch-2.7.
 (aajisaka)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in
+BlockPlacementPolicyDefault#chooseRandom() (kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES



hadoop git commit: MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when strategy is dynamic. Contributed by Kuhu Shukla.

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 18727c63d -> 2868ca032


MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when 
strategy is dynamic. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2868ca03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2868ca03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2868ca03

Branch: refs/heads/trunk
Commit: 2868ca0328d908056745223fb38d9a90fd2811ba
Parents: 18727c6
Author: Kihwal Lee 
Authored: Fri Oct 30 14:56:41 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 14:56:41 2015 -0500

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../tools/mapred/lib/DynamicInputChunk.java | 137 +++
 .../tools/mapred/lib/DynamicInputFormat.java|  31 +++--
 .../tools/mapred/lib/DynamicRecordReader.java   |  13 +-
 .../org/apache/hadoop/tools/StubContext.java|   4 +
 .../mapred/lib/TestDynamicInputFormat.java  |  33 -
 6 files changed, 83 insertions(+), 138 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2868ca03/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 32be987..e999659 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -674,6 +674,9 @@ Release 2.7.2 - UNRELEASED
 MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
 (Junping Du via jlowe)
 
+MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when
+strategy is dynamic (Kuhu Shukla via kihwal)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2868ca03/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 8482e7d..9bf8e47 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -20,14 +20,10 @@ package org.apache.hadoop.tools.mapred.lib;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.tools.CopyListingFileStatus;
-import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
@@ -47,72 +43,28 @@ import java.io.IOException;
  */
 class DynamicInputChunk {
   private static Log LOG = LogFactory.getLog(DynamicInputChunk.class);
-
-  private static Configuration configuration;
-  private static Path chunkRootPath;
-  private static String chunkFilePrefix;
-  private static int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
-  private static FileSystem fs;
-
   private Path chunkFilePath;
   private SequenceFileRecordReader reader;
   private SequenceFile.Writer writer;
+  private DynamicInputChunkContext chunkContext;
 
-  private static void initializeChunkInvariants(Configuration config)
-  throws IOException {
-configuration = config;
-Path listingFilePath = new Path(getListingFilePath(configuration));
-chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
-fs = chunkRootPath.getFileSystem(configuration);
-chunkFilePrefix = listingFilePath.getName() + ".chunk.";
-  }
-
-  private static String getListingFilePath(Configuration configuration) {
-final String listingFileString = configuration.get(
-DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
-assert !listingFileString.equals("") : "Listing file not found.";
-return listingFileString;
-  }
-
-  private static boolean areInvariantsInitialized() {
-return chunkRootPath != null;
-  }
-
-  private DynamicInputChunk(String chunkId, Configuration configuration)
+  DynamicInputChunk(String chunkId, DynamicInputChunkContext chunkContext)

hadoop git commit: MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when strategy is dynamic. Contributed by Kuhu Shukla. (cherry picked from commit 2868ca0328d908056745223fb38d9a9

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a7d8895b6 -> 52819fe4e


MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when 
strategy is dynamic. Contributed by Kuhu Shukla.
(cherry picked from commit 2868ca0328d908056745223fb38d9a90fd2811ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52819fe4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52819fe4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52819fe4

Branch: refs/heads/branch-2
Commit: 52819fe4ef3b04761d6d365e7e52f0006c6840e4
Parents: a7d8895
Author: Kihwal Lee 
Authored: Fri Oct 30 14:57:57 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 14:57:57 2015 -0500

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../tools/mapred/lib/DynamicInputChunk.java | 137 +++
 .../tools/mapred/lib/DynamicInputFormat.java|  31 +++--
 .../tools/mapred/lib/DynamicRecordReader.java   |  13 +-
 .../org/apache/hadoop/tools/StubContext.java|   4 +
 .../mapred/lib/TestDynamicInputFormat.java  |  33 -
 6 files changed, 83 insertions(+), 138 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52819fe4/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index fe896f4..37215be 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -393,6 +393,9 @@ Release 2.7.2 - UNRELEASED
 MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
 (Junping Du via jlowe)
 
+MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when
+strategy is dynamic (Kuhu Shukla via kihwal)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52819fe4/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 8482e7d..9bf8e47 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -20,14 +20,10 @@ package org.apache.hadoop.tools.mapred.lib;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.tools.CopyListingFileStatus;
-import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
@@ -47,72 +43,28 @@ import java.io.IOException;
  */
 class DynamicInputChunk {
   private static Log LOG = LogFactory.getLog(DynamicInputChunk.class);
-
-  private static Configuration configuration;
-  private static Path chunkRootPath;
-  private static String chunkFilePrefix;
-  private static int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
-  private static FileSystem fs;
-
   private Path chunkFilePath;
   private SequenceFileRecordReader reader;
   private SequenceFile.Writer writer;
+  private DynamicInputChunkContext chunkContext;
 
-  private static void initializeChunkInvariants(Configuration config)
-  throws IOException {
-configuration = config;
-Path listingFilePath = new Path(getListingFilePath(configuration));
-chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
-fs = chunkRootPath.getFileSystem(configuration);
-chunkFilePrefix = listingFilePath.getName() + ".chunk.";
-  }
-
-  private static String getListingFilePath(Configuration configuration) {
-final String listingFileString = configuration.get(
-DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
-assert !listingFileString.equals("") : "Listing file not found.";
-return listingFileString;
-  }
-
-  private static boolean areInvariantsInitialized() {
-return chunkRootPath != null;
-  }
-
-  private DynamicInputChunk(String chunkId, Configuration configuration)
+  DynamicInputChunk(

hadoop git commit: MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when strategy is dynamic. Contributed by Kuhu Shukla. (cherry picked from commit 2868ca0328d908056745223fb38d9a9

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 431f48f65 -> 7b783911c


MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when 
strategy is dynamic. Contributed by Kuhu Shukla.
(cherry picked from commit 2868ca0328d908056745223fb38d9a90fd2811ba)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b783911
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b783911
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b783911

Branch: refs/heads/branch-2.7
Commit: 7b783911c4062dcc957a1d577ffec56471510d23
Parents: 431f48f
Author: Kihwal Lee 
Authored: Fri Oct 30 14:58:54 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 14:58:54 2015 -0500

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../tools/mapred/lib/DynamicInputChunk.java | 137 +++
 .../tools/mapred/lib/DynamicInputFormat.java|  31 +++--
 .../tools/mapred/lib/DynamicRecordReader.java   |  13 +-
 .../org/apache/hadoop/tools/StubContext.java|   4 +
 .../mapred/lib/TestDynamicInputFormat.java  |  33 -
 6 files changed, 83 insertions(+), 138 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b783911/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9aac41a..7f61e4d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -55,6 +55,9 @@ Release 2.7.2 - UNRELEASED
 MAPREDUCE-6528. Memory leak for HistoryFileManager.getJobSummary()
 (Junping Du via jlowe)
 
+MAPREDUCE-6451. DistCp has incorrect chunkFilePath for multiple jobs when
+strategy is dynamic (Kuhu Shukla via kihwal)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b783911/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
index 8482e7d..9bf8e47 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
@@ -20,14 +20,10 @@ package org.apache.hadoop.tools.mapred.lib;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.tools.CopyListingFileStatus;
-import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
@@ -47,72 +43,28 @@ import java.io.IOException;
  */
 class DynamicInputChunk {
   private static Log LOG = LogFactory.getLog(DynamicInputChunk.class);
-
-  private static Configuration configuration;
-  private static Path chunkRootPath;
-  private static String chunkFilePrefix;
-  private static int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
-  private static FileSystem fs;
-
   private Path chunkFilePath;
   private SequenceFileRecordReader reader;
   private SequenceFile.Writer writer;
+  private DynamicInputChunkContext chunkContext;
 
-  private static void initializeChunkInvariants(Configuration config)
-  throws IOException {
-configuration = config;
-Path listingFilePath = new Path(getListingFilePath(configuration));
-chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
-fs = chunkRootPath.getFileSystem(configuration);
-chunkFilePrefix = listingFilePath.getName() + ".chunk.";
-  }
-
-  private static String getListingFilePath(Configuration configuration) {
-final String listingFileString = configuration.get(
-DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
-assert !listingFileString.equals("") : "Listing file not found.";
-return listingFileString;
-  }
-
-  private static boolean areInvariantsInitialized() {
-return chunkRootPath != null;
-  }
-
-  private DynamicInputChunk(String chunkId, Configuration configuration)
+  DynamicInputChunk(

hadoop git commit: Addendum to MAPREDUCE-6451

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2868ca032 -> b24fe0648


Addendum to MAPREDUCE-6451


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b24fe064
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b24fe064
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b24fe064

Branch: refs/heads/trunk
Commit: b24fe0648348d325d14931f80cee8a170fb3358a
Parents: 2868ca0
Author: Kihwal Lee 
Authored: Fri Oct 30 16:05:23 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 16:05:23 2015 -0500

--
 .../mapred/lib/DynamicInputChunkContext.java| 113 +++
 1 file changed, 113 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b24fe064/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
new file mode 100644
index 000..043ff1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools.mapred.lib;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.tools.DistCpConstants;
+
+import java.io.IOException;
+
+/**
+ * Class to initialize the DynamicInputChunk invariants.
+ */
+class DynamicInputChunkContext {
+
+  private static Log LOG = LogFactory.getLog(DynamicInputChunkContext.class);
+  private Configuration configuration;
+  private Path chunkRootPath = null;
+  private String chunkFilePrefix;
+  private FileSystem fs;
+  private int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
+
+  public DynamicInputChunkContext(Configuration config)
+  throws IOException {
+this.configuration = config;
+Path listingFilePath = new Path(getListingFilePath(configuration));
+chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
+fs = chunkRootPath.getFileSystem(configuration);
+chunkFilePrefix = listingFilePath.getName() + ".chunk.";
+  }
+
+  public Configuration getConfiguration() {
+return configuration;
+  }
+
+  public Path getChunkRootPath() {
+return chunkRootPath;
+  }
+
+  public String getChunkFilePrefix() {
+return chunkFilePrefix;
+  }
+
+  public FileSystem getFs() {
+return fs;
+  }
+
+  private static String getListingFilePath(Configuration configuration) {
+final String listingFileString = configuration.get(
+DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
+assert !listingFileString.equals("") : "Listing file not found.";
+return listingFileString;
+  }
+
+  public int getNumChunksLeft() {
+return numChunksLeft;
+  }
+
+  public DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
+  throws IOException, InterruptedException {
+
+String taskId
+= taskAttemptContext.getTaskAttemptID().getTaskID().toString();
+Path acquiredFilePath = new Path(getChunkRootPath(), taskId);
+
+if (fs.exists(acquiredFilePath)) {
+  LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
+  return new DynamicInputChunk(acquiredFilePath, taskAttemptContext, this);
+}
+
+for (FileStatus chunkFile : getListOfChunkFiles()) {
+  if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
+LOG.info(taskId + " acquired " + chunkFile.getPath());
+return new DynamicInputChu

hadoop git commit: Addendum to MAPREDUCE-6451 (cherry picked from commit b24fe0648348d325d14931f80cee8a170fb3358a)

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 52819fe4e -> dae9f0ef5


Addendum to MAPREDUCE-6451
(cherry picked from commit b24fe0648348d325d14931f80cee8a170fb3358a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dae9f0ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dae9f0ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dae9f0ef

Branch: refs/heads/branch-2
Commit: dae9f0ef5341c3939c33c05ca1ebfd0a7c8e773d
Parents: 52819fe
Author: Kihwal Lee 
Authored: Fri Oct 30 16:06:50 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 16:06:50 2015 -0500

--
 .../mapred/lib/DynamicInputChunkContext.java| 113 +++
 1 file changed, 113 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dae9f0ef/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
new file mode 100644
index 000..043ff1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools.mapred.lib;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.tools.DistCpConstants;
+
+import java.io.IOException;
+
+/**
+ * Class to initialize the DynamicInputChunk invariants.
+ */
+class DynamicInputChunkContext {
+
+  private static Log LOG = LogFactory.getLog(DynamicInputChunkContext.class);
+  private Configuration configuration;
+  private Path chunkRootPath = null;
+  private String chunkFilePrefix;
+  private FileSystem fs;
+  private int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
+
+  public DynamicInputChunkContext(Configuration config)
+  throws IOException {
+this.configuration = config;
+Path listingFilePath = new Path(getListingFilePath(configuration));
+chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
+fs = chunkRootPath.getFileSystem(configuration);
+chunkFilePrefix = listingFilePath.getName() + ".chunk.";
+  }
+
+  public Configuration getConfiguration() {
+return configuration;
+  }
+
+  public Path getChunkRootPath() {
+return chunkRootPath;
+  }
+
+  public String getChunkFilePrefix() {
+return chunkFilePrefix;
+  }
+
+  public FileSystem getFs() {
+return fs;
+  }
+
+  private static String getListingFilePath(Configuration configuration) {
+final String listingFileString = configuration.get(
+DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
+assert !listingFileString.equals("") : "Listing file not found.";
+return listingFileString;
+  }
+
+  public int getNumChunksLeft() {
+return numChunksLeft;
+  }
+
+  public DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
+  throws IOException, InterruptedException {
+
+String taskId
+= taskAttemptContext.getTaskAttemptID().getTaskID().toString();
+Path acquiredFilePath = new Path(getChunkRootPath(), taskId);
+
+if (fs.exists(acquiredFilePath)) {
+  LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
+  return new DynamicInputChunk(acquiredFilePath, taskAttemptContext, this);
+}
+
+for (FileStatus chunkFile : getListOfChunkFiles()) {
+  if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
+LOG.info(taskId + " a

hadoop git commit: Addendum to MAPREDUCE-6451 (cherry picked from commit b24fe0648348d325d14931f80cee8a170fb3358a)

2015-10-30 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7b783911c -> 63cd0c8ba


Addendum to MAPREDUCE-6451
(cherry picked from commit b24fe0648348d325d14931f80cee8a170fb3358a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63cd0c8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63cd0c8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63cd0c8b

Branch: refs/heads/branch-2.7
Commit: 63cd0c8ba143450853d675546b196d65452ebef6
Parents: 7b78391
Author: Kihwal Lee 
Authored: Fri Oct 30 16:07:49 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 30 16:07:49 2015 -0500

--
 .../mapred/lib/DynamicInputChunkContext.java| 113 +++
 1 file changed, 113 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63cd0c8b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
new file mode 100644
index 000..043ff1c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunkContext.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools.mapred.lib;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.tools.DistCpConstants;
+
+import java.io.IOException;
+
+/**
+ * Class to initialize the DynamicInputChunk invariants.
+ */
+class DynamicInputChunkContext {
+
+  private static Log LOG = LogFactory.getLog(DynamicInputChunkContext.class);
+  private Configuration configuration;
+  private Path chunkRootPath = null;
+  private String chunkFilePrefix;
+  private FileSystem fs;
+  private int numChunksLeft = -1; // Un-initialized before 1st dir-scan.
+
+  public DynamicInputChunkContext(Configuration config)
+  throws IOException {
+this.configuration = config;
+Path listingFilePath = new Path(getListingFilePath(configuration));
+chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
+fs = chunkRootPath.getFileSystem(configuration);
+chunkFilePrefix = listingFilePath.getName() + ".chunk.";
+  }
+
+  public Configuration getConfiguration() {
+return configuration;
+  }
+
+  public Path getChunkRootPath() {
+return chunkRootPath;
+  }
+
+  public String getChunkFilePrefix() {
+return chunkFilePrefix;
+  }
+
+  public FileSystem getFs() {
+return fs;
+  }
+
+  private static String getListingFilePath(Configuration configuration) {
+final String listingFileString = configuration.get(
+DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, "");
+assert !listingFileString.equals("") : "Listing file not found.";
+return listingFileString;
+  }
+
+  public int getNumChunksLeft() {
+return numChunksLeft;
+  }
+
+  public DynamicInputChunk acquire(TaskAttemptContext taskAttemptContext)
+  throws IOException, InterruptedException {
+
+String taskId
+= taskAttemptContext.getTaskAttemptID().getTaskID().toString();
+Path acquiredFilePath = new Path(getChunkRootPath(), taskId);
+
+if (fs.exists(acquiredFilePath)) {
+  LOG.info("Acquiring pre-assigned chunk: " + acquiredFilePath);
+  return new DynamicInputChunk(acquiredFilePath, taskAttemptContext, this);
+}
+
+for (FileStatus chunkFile : getListOfChunkFiles()) {
+  if (fs.rename(chunkFile.getPath(), acquiredFilePath)) {
+LOG.info(

hadoop git commit: HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling values() since it creates a temporary array. (Staffan Friberg via yliu) (cherry picked from commit 0ff12161

2015-11-02 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e1798e8f2 -> 2c51bbda5


HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling 
values() since it creates a temporary array. (Staffan Friberg via yliu)
(cherry picked from commit 0ff1216100d16cfa862854a89cd1be8969b0bd7e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c51bbda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c51bbda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c51bbda

Branch: refs/heads/branch-2.7
Commit: 2c51bbda510577506b3711c28b422b633ac59e5e
Parents: e1798e8
Author: Kihwal Lee 
Authored: Mon Nov 2 09:07:16 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 2 09:07:16 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../apache/hadoop/hdfs/server/common/HdfsServerConstants.java  | 6 --
 2 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c51bbda/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8cf1bd9..314caa8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -16,6 +16,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8099. Change "DFSInputStream has been closed already" message to
 debug log level (Charles Lamb via Colin P. McCabe)
 
+HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c51bbda/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 8af3af7..e5bcecf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -250,6 +250,8 @@ public final class HdfsServerConstants {
 /** Temporary replica: created for replication and relocation only. */
 TEMPORARY(4);
 
+private static final ReplicaState[] cachedValues = ReplicaState.values();
+
 private final int value;
 
 private ReplicaState(int v) {
@@ -261,12 +263,12 @@ public final class HdfsServerConstants {
 }
 
 public static ReplicaState getState(int v) {
-  return ReplicaState.values()[v];
+  return cachedValues[v];
 }
 
 /** Read from in */
 public static ReplicaState read(DataInput in) throws IOException {
-  return values()[in.readByte()];
+  return cachedValues[in.readByte()];
 }
 
 /** Write to out */



hadoop git commit: Fix CHANGES.txt

2015-11-02 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e4f8a46c -> 2529464f0


Fix CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2529464f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2529464f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2529464f

Branch: refs/heads/trunk
Commit: 2529464f0841732792343d515cd1be1dccb3c453
Parents: 6e4f8a4
Author: Kihwal Lee 
Authored: Mon Nov 2 09:09:33 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 2 09:09:33 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2529464f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8e6634a..0bbc60d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1529,9 +1529,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9110. Use Files.walkFileTree in NNUpgradeUtil#doPreUpgrade for
 better efficiency. (Charlie Helin via wang)
 
-HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
-values() since it creates a temporary array. (Staffan Friberg via yliu)
-
 HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
 BlockManager#excessReplicateMap. (yliu)
 
@@ -2220,6 +2217,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8099. Change "DFSInputStream has been closed already" message to
 debug log level (Charles Lamb via Colin P. McCabe)
 
+HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)



hadoop git commit: Fix CHANGES.txt (cherry picked from commit 2529464f0841732792343d515cd1be1dccb3c453)

2015-11-02 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bb2ee9f8e -> d65ca63c6


Fix CHANGES.txt
(cherry picked from commit 2529464f0841732792343d515cd1be1dccb3c453)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d65ca63c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d65ca63c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d65ca63c

Branch: refs/heads/branch-2
Commit: d65ca63c6869a17729df0236443d00577863cbe1
Parents: bb2ee9f
Author: Kihwal Lee 
Authored: Mon Nov 2 09:10:55 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 2 09:10:55 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d65ca63c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d879c93..8c97015 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -688,9 +688,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9110. Use Files.walkFileTree in NNUpgradeUtil#doPreUpgrade for
 better efficiency. (Charlie Helin via wang)
 
-HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
-values() since it creates a temporary array. (Staffan Friberg via yliu)
-
 HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
 BlockManager#excessReplicateMap. (yliu)
 
@@ -1383,6 +1380,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-8099. Change "DFSInputStream has been closed already" message to
 debug log level (Charles Lamb via Colin P. McCabe)
 
+HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
+values() since it creates a temporary array. (Staffan Friberg via yliu)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)



hadoop git commit: fix up CHANGES.txt

2015-11-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0eed886a1 -> 3fb1ece4e


fix up CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fb1ece4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fb1ece4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fb1ece4

Branch: refs/heads/trunk
Commit: 3fb1ece4e9b290ad4a0b6357a519b20f59561911
Parents: 0eed886
Author: Kihwal Lee 
Authored: Wed Nov 4 12:14:45 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 4 12:15:10 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb1ece4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bdcc1fc..500dc92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2225,9 +2225,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
 endings, fails on Windows. (cnauroth)
 
-HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
-commitBlock. (Chang Li via zhz)
-
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
@@ -2246,6 +2243,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: fix up CHANGES.txt

2015-11-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b61aa716e -> c8ffea3db


fix up CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8ffea3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8ffea3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8ffea3d

Branch: refs/heads/branch-2
Commit: c8ffea3db77125b36df93c4970f6349049bb2673
Parents: b61aa71
Author: Kihwal Lee 
Authored: Wed Nov 4 12:14:45 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 4 12:15:58 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ffea3d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 527a8ba..b7f24ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1382,9 +1382,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-9362. TestAuditLogger#testAuditLoggerWithCallContext assumes Unix line
 endings, fails on Windows. (cnauroth)
 
-HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
-commitBlock. (Chang Li via zhz)
-
 HDFS-9351. checkNNStartup() need to be called when fsck calls
 FSNamesystem.getSnapshottableDirs(). (Xiao Chen via Yongjun Zhang)
 
@@ -1403,6 +1400,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via zhz)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in commitBlock. Contributed by Chang Li.

2015-11-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 abfc710b0 -> 397b554c3


HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in 
commitBlock. Contributed by Chang Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/397b554c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/397b554c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/397b554c

Branch: refs/heads/branch-2.7
Commit: 397b554c36867724ca4167931270cd7af784e54a
Parents: abfc710
Author: Kihwal Lee 
Authored: Wed Nov 4 12:10:59 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 4 12:10:59 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |   2 +-
 .../BlockInfoContiguousUnderConstruction.java   |   2 +-
 .../server/blockmanagement/BlockManager.java|   4 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  69 +
 .../TestCommitBlockWithInvalidGenStamp.java | 100 +++
 6 files changed, 178 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/397b554c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bd92181..45ce310 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -12,6 +12,9 @@ Release 2.7.3 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
+commitBlock. (Chang Li via kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/397b554c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index def829c..3c8b2d3 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -226,7 +226,7 @@ public class DFSOutputStream extends FSOutputSummer
   //
   class DataStreamer extends Daemon {
 private volatile boolean streamerClosed = false;
-private ExtendedBlock block; // its length is number of bytes acked
+private volatile ExtendedBlock block; // its length is number of bytes 
acked
 private Token accessToken;
 private DataOutputStream blockStream;
 private DataInputStream blockReplyStream;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/397b554c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index 92153ab..4f315c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -274,7 +274,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
   throw new IOException("Trying to commit inconsistent block: id = "
   + block.getBlockId() + ", expected id = " + getBlockId());
 blockUCState = BlockUCState.COMMITTED;
-this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp());
+this.setNumBytes(block.getNumBytes());
 // Sort out invalid replicas.
 setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/397b554c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c360d4c..63a7aed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanag

hadoop git commit: HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee. (cherry picked from commit ff47f35deed14ba6463cba76f0e6a6c1

2015-11-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 397b554c3 -> 616ed9084


HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.
(cherry picked from commit ff47f35deed14ba6463cba76f0e6a6c15abb3eca)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/616ed908
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/616ed908
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/616ed908

Branch: refs/heads/branch-2.7
Commit: 616ed9084be4dc337c2ececa7aecc4ab7899a75a
Parents: 397b554
Author: Kihwal Lee 
Authored: Thu Nov 5 09:27:36 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Nov 5 09:27:36 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../server/blockmanagement/BlockPlacementPolicyDefault.java | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/616ed908/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 45ce310..3e62c5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -15,6 +15,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
 commitBlock. (Chang Li via kihwal)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in 
+BlockPlacementPolicyDefault#chooseRandom(). (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/616ed908/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 97ea782..93056e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -622,6 +622,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
+int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -675,6 +676,14 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (i == storages.length);
   }
+  // Refresh the node count. If the live node count became smaller,
+  // but it is not reflected in this loop, it may loop forever in case
+  // the replicas/rack cannot be satisfied.
+  if (--refreshCounter == 0) {
+numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(scope,
+excludedNodes);
+refreshCounter = numOfAvailableNodes;
+  }
 }
   
 if (numOfReplicas>0) {



hadoop git commit: HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.

2015-11-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk c56340179 -> ff47f35de


HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff47f35d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff47f35d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff47f35d

Branch: refs/heads/trunk
Commit: ff47f35deed14ba6463cba76f0e6a6c15abb3eca
Parents: c563401
Author: Kihwal Lee 
Authored: Thu Nov 5 09:25:20 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Nov 5 09:25:20 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../server/blockmanagement/BlockPlacementPolicyDefault.java | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff47f35d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 910753a..f5c6f0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2256,6 +2256,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
 commitBlock. (Chang Li via zhz)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in 
+BlockPlacementPolicyDefault#chooseRandom(). (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff47f35d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 56ebc35..d94179b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,6 +659,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
+int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -708,6 +709,14 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
+  // Refresh the node count. If the live node count became smaller,
+  // but it is not reflected in this loop, it may loop forever in case
+  // the replicas/rack cannot be satisfied.
+  if (--refreshCounter == 0) {
+numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(scope,
+excludedNodes);
+refreshCounter = numOfAvailableNodes;
+  }
 }
   
 if (numOfReplicas>0) {



hadoop git commit: HDFS-4937. ReplicationMonitor can infinite-loop in BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee. (cherry picked from commit ff47f35deed14ba6463cba76f0e6a6c1

2015-11-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6edd41ac6 -> 37abd0e33


HDFS-4937. ReplicationMonitor can infinite-loop in 
BlockPlacementPolicyDefault#chooseRandom(). Contributed by Kihwal Lee.
(cherry picked from commit ff47f35deed14ba6463cba76f0e6a6c15abb3eca)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37abd0e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37abd0e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37abd0e3

Branch: refs/heads/branch-2
Commit: 37abd0e33bedaf19cde73bdbbc31af304cf486a2
Parents: 6edd41a
Author: Kihwal Lee 
Authored: Thu Nov 5 09:26:53 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Nov 5 09:26:53 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../server/blockmanagement/BlockPlacementPolicyDefault.java | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37abd0e3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d259e50..d3073a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1410,6 +1410,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9289. Make DataStreamer#block thread safe and verify genStamp in
 commitBlock. (Chang Li via zhz)
 
+HDFS-4937. ReplicationMonitor can infinite-loop in 
+BlockPlacementPolicyDefault#chooseRandom(). (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37abd0e3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 56ebc35..d94179b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -659,6 +659,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 
 int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
 scope, excludedNodes);
+int refreshCounter = numOfAvailableNodes;
 StringBuilder builder = null;
 if (LOG.isDebugEnabled()) {
   builder = debugLoggingBuilder.get();
@@ -708,6 +709,14 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 // If no candidate storage was found on this DN then set badTarget.
 badTarget = (storage == null);
   }
+  // Refresh the node count. If the live node count became smaller,
+  // but it is not reflected in this loop, it may loop forever in case
+  // the replicas/rack cannot be satisfied.
+  if (--refreshCounter == 0) {
+numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(scope,
+excludedNodes);
+refreshCounter = numOfAvailableNodes;
+  }
 }
   
 if (numOfReplicas>0) {



hadoop git commit: HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla.

2015-11-06 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk b64242c0d -> bf6aa30a1


HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf6aa30a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf6aa30a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf6aa30a

Branch: refs/heads/trunk
Commit: bf6aa30a156b3c5cac5469014a5989e0dfdc7256
Parents: b64242c
Author: Kihwal Lee 
Authored: Fri Nov 6 13:30:33 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Nov 6 13:30:33 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../BlockPlacementPolicyDefault.java|  7 ++-
 .../src/main/resources/hdfs-default.xml |  9 
 .../TestReplicationPolicyConsiderLoad.java  | 56 
 5 files changed, 77 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf6aa30a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a512da5..f12a2a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1683,6 +1683,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9282. Make data directory count and storage raw capacity related tests
 FsDataset-agnostic. (Tony Wu via lei)
 
+HDFS-9318. considerLoad factor can be improved. (Kuhu Shukla via kihwal)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf6aa30a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c14ce20..54e0d10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -184,6 +184,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY;
   public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = 
true;
+  public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR =
+  "dfs.namenode.replication.considerLoad.factor";
+  public static final double
+  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
   public static final String  DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
   public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf6aa30a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d94179b..13b17e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -58,6 +58,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   };
 
   protected boolean considerLoad; 
+  protected double considerLoadFactor;
   private boolean preferLocalNode = true;
   protected NetworkTopology clusterMap;
   protected Host2NodesMap host2datanodeMap;
@@ -79,6 +80,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+this.considerLoadFactor = conf.getDouble(
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);
 this.stats = stats;
 this.clusterMap = clusterMap;
 this.hos

hadoop git commit: HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla. (cherry picked from commit bf6aa30a156b3c5cac5469014a5989e0dfdc7256)

2015-11-06 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 41d0d9a32 -> 481e7248d


HDFS-9318. considerLoad factor can be improved. Contributed by Kuhu Shukla.
(cherry picked from commit bf6aa30a156b3c5cac5469014a5989e0dfdc7256)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481e7248
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481e7248
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481e7248

Branch: refs/heads/branch-2
Commit: 481e7248dee5b0a3e0f3148c3cdde133a637b990
Parents: 41d0d9a
Author: Kihwal Lee 
Authored: Fri Nov 6 14:08:10 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Nov 6 14:08:10 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../BlockPlacementPolicyDefault.java|  7 ++-
 .../src/main/resources/hdfs-default.xml |  9 
 .../TestReplicationPolicyConsiderLoad.java  | 56 
 5 files changed, 77 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1c8e840..4539cd3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -836,6 +836,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-9282. Make data directory count and storage raw capacity related tests
 FsDataset-agnostic. (Tony Wu via lei)
 
+HDFS-9318. considerLoad factor can be improved. (Kuhu Shukla via kihwal)
+
   BUG FIXES
 
 HDFS-8091: ACLStatus and XAttributes should be presented to

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1e6143c..e73aa2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -179,6 +179,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY;
   public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = 
true;
+  public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR =
+  "dfs.namenode.replication.considerLoad.factor";
+  public static final double
+  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
   public static final String  DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
   public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481e7248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d94179b..13b17e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -58,6 +58,7 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
   };
 
   protected boolean considerLoad; 
+  protected double considerLoadFactor;
   private boolean preferLocalNode = true;
   protected NetworkTopology clusterMap;
   protected Host2NodesMap host2datanodeMap;
@@ -79,6 +80,9 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
  Host2NodesMap host2datanodeMap) {
 this.considerLoad = conf.getBoolean(
 DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
+this.considerLoadFactor = conf.getDouble(
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
+DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);
 this.

hadoop git commit: HDFS-9383. TestByteArrayManager#testByteArrayManager fails. Contributed by Tsz Wo Nicholas Sze.

2015-11-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 668e89753 -> ef926b2e3


HDFS-9383. TestByteArrayManager#testByteArrayManager fails. Contributed by Tsz 
Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef926b2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef926b2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef926b2e

Branch: refs/heads/trunk
Commit: ef926b2e3824475581454c1e17a0d7c94529efde
Parents: 668e897
Author: Kihwal Lee 
Authored: Mon Nov 9 08:56:04 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 9 08:56:04 2015 -0600

--
 .../java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java| 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef926b2e/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
index f5dd883..a8d5cef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
@@ -308,7 +308,7 @@ public class TestByteArrayManager {
   public void testByteArrayManager() throws Exception {
 final int countThreshold = 32;
 final int countLimit = 64;
-final long countResetTimePeriodMs = 1000L;
+final long countResetTimePeriodMs = 1L;
 final ByteArrayManager.Impl bam = new ByteArrayManager.Impl(
 new ByteArrayManager.Conf(
 countThreshold, countLimit, countResetTimePeriodMs));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef926b2e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a919402..fbaff1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2292,6 +2292,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-4937. ReplicationMonitor can infinite-loop in 
 BlockPlacementPolicyDefault#chooseRandom(). (kihwal)
 
+HDFS-9383. TestByteArrayManager#testByteArrayManager fails.
+(szetszwo via kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9383. TestByteArrayManager#testByteArrayManager fails. Contributed by Tsz Wo Nicholas Sze. (cherry picked from commit ef926b2e3824475581454c1e17a0d7c94529efde)

2015-11-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1c0c77e7f -> ceb062f09


HDFS-9383. TestByteArrayManager#testByteArrayManager fails. Contributed by Tsz 
Wo Nicholas Sze.
(cherry picked from commit ef926b2e3824475581454c1e17a0d7c94529efde)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ceb062f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ceb062f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ceb062f0

Branch: refs/heads/branch-2
Commit: ceb062f093b9d1b817a1012ce1ff0bee2492f89a
Parents: 1c0c77e
Author: Kihwal Lee 
Authored: Mon Nov 9 08:57:25 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 9 08:57:25 2015 -0600

--
 .../java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java| 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ceb062f0/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
index f5dd883..a8d5cef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
@@ -308,7 +308,7 @@ public class TestByteArrayManager {
   public void testByteArrayManager() throws Exception {
 final int countThreshold = 32;
 final int countLimit = 64;
-final long countResetTimePeriodMs = 1000L;
+final long countResetTimePeriodMs = 1L;
 final ByteArrayManager.Impl bam = new ByteArrayManager.Impl(
 new ByteArrayManager.Conf(
 countThreshold, countLimit, countResetTimePeriodMs));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ceb062f0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 779c925..a366428 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1443,6 +1443,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-4937. ReplicationMonitor can infinite-loop in 
 BlockPlacementPolicyDefault#chooseRandom(). (kihwal)
 
+HDFS-9383. TestByteArrayManager#testByteArrayManager fails.
+(szetszwo via kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-9383. TestByteArrayManager#testByteArrayManager fails. Contributed by Tsz Wo Nicholas Sze. (cherry picked from commit ef926b2e3824475581454c1e17a0d7c94529efde)

2015-11-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7e8467df9 -> 6f678deb8


HDFS-9383. TestByteArrayManager#testByteArrayManager fails. Contributed by Tsz 
Wo Nicholas Sze.
(cherry picked from commit ef926b2e3824475581454c1e17a0d7c94529efde)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f678deb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f678deb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f678deb

Branch: refs/heads/branch-2.7
Commit: 6f678deb866bd842df8b13c87bcbd70add354265
Parents: 7e8467d
Author: Kihwal Lee 
Authored: Mon Nov 9 08:58:58 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Nov 9 08:58:58 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f678deb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 65f7b61..d52253d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -18,6 +18,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-4937. ReplicationMonitor can infinite-loop in 
 BlockPlacementPolicyDefault#chooseRandom(). (kihwal)
 
+HDFS-9383. TestByteArrayManager#testByteArrayManager fails.
+(szetszwo via kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f678deb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
index 77a68c6..1a08950 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
@@ -309,7 +309,7 @@ public class TestByteArrayManager {
   public void testByteArrayManager() throws Exception {
 final int countThreshold = 32;
 final int countLimit = 64;
-final long countResetTimePeriodMs = 1000L;
+final long countResetTimePeriodMs = 1L;
 final ByteArrayManager.Impl bam = new ByteArrayManager.Impl(
 new ByteArrayManager.Conf(
 countThreshold, countLimit, countResetTimePeriodMs));



hadoop git commit: HDFS-9396. Total files and directories on jmx and web UI on standby is uninitialized. Contributed by Kihwal Lee.

2015-11-11 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6351d3fa6 -> a2a104f3d


HDFS-9396. Total files and directories on jmx and web UI on standby is 
uninitialized. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2a104f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2a104f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2a104f3

Branch: refs/heads/trunk
Commit: a2a104f3d30182953db6690304146f0a582ed241
Parents: 6351d3f
Author: Kihwal Lee 
Authored: Wed Nov 11 15:22:36 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 11 15:24:43 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSDirectory.java   |  8 +--
 .../hdfs/server/namenode/ha/TestHAMetrics.java  | 56 
 3 files changed, 60 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a104f3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 53d20cc..e6a5b71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2293,6 +2293,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9245. Fix findbugs warnings in hdfs-nfs/WriteCtx.
 (Mingliang Liu via xyao)
 
+HDFS-9396. Total files and directories on jmx and web UI on standby is
+uninitialized. (kihwal)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a104f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index cb1ac05..0f3011a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1218,13 +1218,7 @@ public class FSDirectory implements Closeable {
   }
 
   long totalInodes() {
-readLock();
-try {
-  return rootDir.getDirectoryWithQuotaFeature().getSpaceConsumed()
-  .getNameSpace();
-} finally {
-  readUnlock();
-}
+return getInodeMapSize();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a104f3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
index 6f9fc6e..432f7df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
@@ -24,8 +24,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Test;
@@ -119,4 +121,58 @@ public class TestHAMetrics {
   cluster.shutdown();
 }
   }
+
+  @Test
+  public void testHAInodeCount() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE);
+
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1)
+.build();
+FileSystem fs = null;
+try {
+  cluster.waitActive();
+
+  FSNamesystem nn0 = cluster.getNamesystem(0);
+  FSNamesystem nn1 = cluster.getNamesystem(1);
+
+  cluster.transitionToActive(0);
+  fs = HATestUtil.configureFailoverFs(cluster, conf);
+  DFSTestUtil.createFile(fs, new Path("/testHAInodeCount1"),
+  10, (short)1, 1L);
+  DFSTestUtil.createFile(fs, new Path("

hadoop git commit: HDFS-9396. Total files and directories on jmx and web UI on standby is uninitialized. Contributed by Kihwal Lee. (cherry picked from commit a2a104f3d30182953db6690304146f0a582ed241)

2015-11-11 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bb5dd7f77 -> 8726a5e02


HDFS-9396. Total files and directories on jmx and web UI on standby is 
uninitialized. Contributed by Kihwal Lee.
(cherry picked from commit a2a104f3d30182953db6690304146f0a582ed241)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8726a5e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8726a5e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8726a5e0

Branch: refs/heads/branch-2
Commit: 8726a5e02004bc5c7e30acb39982c23b09eaec10
Parents: bb5dd7f
Author: Kihwal Lee 
Authored: Wed Nov 11 15:37:11 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Nov 11 15:37:11 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSDirectory.java   |  8 +--
 .../hdfs/server/namenode/ha/TestHAMetrics.java  | 56 
 3 files changed, 60 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8726a5e0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 87585d8..b3d73b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1444,6 +1444,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-9245. Fix findbugs warnings in hdfs-nfs/WriteCtx.
 (Mingliang Liu via xyao)
 
+HDFS-9396. Total files and directories on jmx and web UI on standby is
+uninitialized. (kihwal)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8726a5e0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 0df6053..52b2616 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1218,13 +1218,7 @@ public class FSDirectory implements Closeable {
   }
 
   long totalInodes() {
-readLock();
-try {
-  return rootDir.getDirectoryWithQuotaFeature().getSpaceConsumed()
-  .getNameSpace();
-} finally {
-  readUnlock();
-}
+return getInodeMapSize();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8726a5e0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
index 6f9fc6e..432f7df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
@@ -24,8 +24,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Test;
@@ -119,4 +121,58 @@ public class TestHAMetrics {
   cluster.shutdown();
 }
   }
+
+  @Test
+  public void testHAInodeCount() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE);
+
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1)
+.build();
+FileSystem fs = null;
+try {
+  cluster.waitActive();
+
+  FSNamesystem nn0 = cluster.getNamesystem(0);
+  FSNamesystem nn1 = cluster.getNamesystem(1);
+
+  cluster.transitionToActive(0);
+  fs = HATestUtil.configureFailoverFs(cluster, conf);
+  DFSTestUtil.createFile(fs, new Path("/testHAInodeCount1"),
+  10,

hadoop git commit: HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed by Kihwal Lee)

2015-12-01 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 456426fe6 -> b70b380f3


HDFS-9426. Rollingupgrade finalization is not backward compatible (Contributed 
by Kihwal Lee)

(cherry picked from commit c62d42cd8bb09a5ffc0c5eefa2d87913e71b9e7e)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
(cherry picked from commit 9f256d1d716a7e17606245fcfc619901a8fa299a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b70b380f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b70b380f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b70b380f

Branch: refs/heads/branch-2.8
Commit: b70b380f37ff03d5b30b8aa2adb9e731fb898c5f
Parents: 456426f
Author: Kihwal Lee 
Authored: Tue Dec 1 16:43:15 2015 -0600
Committer: Kihwal Lee 
Committed: Tue Dec 1 16:43:27 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../DatanodeProtocolClientSideTranslatorPB.java|  5 -
 .../DatanodeProtocolServerSideTranslatorPB.java| 13 +++--
 .../hadoop-hdfs/src/main/proto/DatanodeProtocol.proto  |  1 +
 4 files changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b70b380f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 586790f..bf4d620 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1649,6 +1649,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-6481. DatanodeManager#getDatanodeStorageInfos() should check the
 length of storageIDs. (szetszwo via Arpit Agarwal)
 
+HDFS-9426. Rollingupgrade finalization is not backward compatible
+    (Kihwal Lee via vinayakumarb)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b70b380f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 705d573..388680b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -163,7 +163,10 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   index++;
 }
 RollingUpgradeStatus rollingUpdateStatus = null;
-if (resp.hasRollingUpgradeStatus()) {
+// Use v2 semantics if available.
+if (resp.hasRollingUpgradeStatusV2()) {
+  rollingUpdateStatus = 
PBHelperClient.convert(resp.getRollingUpgradeStatusV2());
+} else if (resp.hasRollingUpgradeStatus()) {
   rollingUpdateStatus = 
PBHelperClient.convert(resp.getRollingUpgradeStatus());
 }
 return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b70b380f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
index 4b9f7c4..4f8f44f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+imp

hadoop git commit: HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by Walter Su.

2015-12-11 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk e363417e7 -> a48301791


HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by 
Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4830179
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4830179
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4830179

Branch: refs/heads/trunk
Commit: a48301791e9564363bc2abad4e89e344b0d7a5ff
Parents: e363417
Author: Kihwal Lee 
Authored: Fri Dec 11 08:44:47 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Dec 11 08:44:47 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 89 
 .../fsdataset/impl/TestFsDatasetImpl.java   |  4 +
 3 files changed, 59 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4830179/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1696053..c1a323b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2574,6 +2574,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9294. DFSClient deadlock when close file and failed to renew lease.
 (Brahma Reddy Battula via szetszwo)
 
+HDFS-9445. Datanode may deadlock while handling a bad volume.
+(Wlater Su via Kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4830179/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 1d8c705..afa4dee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -470,48 +470,67 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* Removes a set of volumes from FsDataset.
* @param volumesToRemove a set of absolute root path of each volume.
* @param clearFailure set true to clear failure information.
-   *
-   * DataNode should call this function before calling
-   * {@link DataStorage#removeVolumes(java.util.Collection)}.
*/
   @Override
-  public synchronized void removeVolumes(
-  Set volumesToRemove, boolean clearFailure) {
+  public void removeVolumes(Set volumesToRemove, boolean clearFailure) {
 // Make sure that all volumes are absolute path.
 for (File vol : volumesToRemove) {
   Preconditions.checkArgument(vol.isAbsolute(),
   String.format("%s is not absolute path.", vol.getPath()));
 }
-for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
-  Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-  final File absRoot = sd.getRoot().getAbsoluteFile();
-  if (volumesToRemove.contains(absRoot)) {
-LOG.info("Removing " + absRoot + " from FsDataset.");
-
-// Disable the volume from the service.
-asyncDiskService.removeVolume(sd.getCurrentDir());
-volumes.removeVolume(absRoot, clearFailure);
-
-// Removed all replica information for the blocks on the volume. Unlike
-// updating the volumeMap in addVolume(), this operation does not scan
-// disks.
-for (String bpid : volumeMap.getBlockPoolList()) {
-  for (Iterator it = volumeMap.replicas(bpid).iterator();
-   it.hasNext(); ) {
-ReplicaInfo block = it.next();
-final File absBasePath =
-new File(block.getVolume().getBasePath()).getAbsoluteFile();
-if (absBasePath.equals(absRoot)) {
-  invalidate(bpid, block);
-  it.remove();
+
+Map> blkToInvalidate = new HashMap<>();
+List storageToRemove = new ArrayList<>();
+synchronized (this) {
+  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
+Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
+final File absRoot = sd.getRoot().getAbsoluteFile();
+if (volumesToRemove.contains(absRoot)) {
+  LOG.info("Removing " + absRoot + " from FsDataset.");
+
+  // Disable the volume f

hadoop git commit: HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by Walter Su. (cherry picked from commit a48301791e9564363bc2abad4e89e344b0d7a5ff)

2015-12-11 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7dc558b6a -> a72ef921f


HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by 
Walter Su.
(cherry picked from commit a48301791e9564363bc2abad4e89e344b0d7a5ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a72ef921
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a72ef921
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a72ef921

Branch: refs/heads/branch-2
Commit: a72ef921f00f6536bcb3e09ef5bd147cd8890065
Parents: 7dc558b
Author: Kihwal Lee 
Authored: Fri Dec 11 08:46:03 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Dec 11 08:46:03 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 89 
 .../fsdataset/impl/TestFsDatasetImpl.java   |  4 +
 3 files changed, 59 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72ef921/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3fd5231..d2eed68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1709,6 +1709,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9294. DFSClient deadlock when close file and failed to renew lease.
 (Brahma Reddy Battula via szetszwo)
 
+HDFS-9445. Datanode may deadlock while handling a bad volume.
+(Wlater Su via Kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72ef921/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index a896287..cb5af93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -475,48 +475,67 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* Removes a set of volumes from FsDataset.
* @param volumesToRemove a set of absolute root path of each volume.
* @param clearFailure set true to clear failure information.
-   *
-   * DataNode should call this function before calling
-   * {@link DataStorage#removeVolumes(java.util.Collection)}.
*/
   @Override
-  public synchronized void removeVolumes(
-  Set volumesToRemove, boolean clearFailure) {
+  public void removeVolumes(Set volumesToRemove, boolean clearFailure) {
 // Make sure that all volumes are absolute path.
 for (File vol : volumesToRemove) {
   Preconditions.checkArgument(vol.isAbsolute(),
   String.format("%s is not absolute path.", vol.getPath()));
 }
-for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
-  Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-  final File absRoot = sd.getRoot().getAbsoluteFile();
-  if (volumesToRemove.contains(absRoot)) {
-LOG.info("Removing " + absRoot + " from FsDataset.");
-
-// Disable the volume from the service.
-asyncDiskService.removeVolume(sd.getCurrentDir());
-volumes.removeVolume(absRoot, clearFailure);
-
-// Removed all replica information for the blocks on the volume. Unlike
-// updating the volumeMap in addVolume(), this operation does not scan
-// disks.
-for (String bpid : volumeMap.getBlockPoolList()) {
-  for (Iterator it = volumeMap.replicas(bpid).iterator();
-   it.hasNext(); ) {
-ReplicaInfo block = it.next();
-final File absBasePath =
-new File(block.getVolume().getBasePath()).getAbsoluteFile();
-if (absBasePath.equals(absRoot)) {
-  invalidate(bpid, block);
-  it.remove();
+
+Map> blkToInvalidate = new HashMap<>();
+List storageToRemove = new ArrayList<>();
+synchronized (this) {
+  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
+Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
+final File absRoot = sd.getRoot().getAbsoluteFile();
+if (volumesToRemove.contains(absRoot)) {
+  LOG.info("Removing " + absRoo

hadoop git commit: HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by Walter Su. (cherry picked from commit a48301791e9564363bc2abad4e89e344b0d7a5ff)

2015-12-11 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d02829541 -> 937a43dc2


HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by 
Walter Su.
(cherry picked from commit a48301791e9564363bc2abad4e89e344b0d7a5ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/937a43dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/937a43dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/937a43dc

Branch: refs/heads/branch-2.8
Commit: 937a43dc29d91f27bbd6f15182fdfcd3583cf2fb
Parents: d028295
Author: Kihwal Lee 
Authored: Fri Dec 11 08:46:42 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Dec 11 08:46:42 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 89 
 .../fsdataset/impl/TestFsDatasetImpl.java   |  4 +
 3 files changed, 59 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/937a43dc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 629d330..0d71ba1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1671,6 +1671,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9294. DFSClient deadlock when close file and failed to renew lease.
 (Brahma Reddy Battula via szetszwo)
 
+HDFS-9445. Datanode may deadlock while handling a bad volume.
+(Wlater Su via Kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/937a43dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 37b6b9e..8fb06bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -475,48 +475,67 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* Removes a set of volumes from FsDataset.
* @param volumesToRemove a set of absolute root path of each volume.
* @param clearFailure set true to clear failure information.
-   *
-   * DataNode should call this function before calling
-   * {@link DataStorage#removeVolumes(java.util.Collection)}.
*/
   @Override
-  public synchronized void removeVolumes(
-  Set volumesToRemove, boolean clearFailure) {
+  public void removeVolumes(Set volumesToRemove, boolean clearFailure) {
 // Make sure that all volumes are absolute path.
 for (File vol : volumesToRemove) {
   Preconditions.checkArgument(vol.isAbsolute(),
   String.format("%s is not absolute path.", vol.getPath()));
 }
-for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
-  Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-  final File absRoot = sd.getRoot().getAbsoluteFile();
-  if (volumesToRemove.contains(absRoot)) {
-LOG.info("Removing " + absRoot + " from FsDataset.");
-
-// Disable the volume from the service.
-asyncDiskService.removeVolume(sd.getCurrentDir());
-volumes.removeVolume(absRoot, clearFailure);
-
-// Removed all replica information for the blocks on the volume. Unlike
-// updating the volumeMap in addVolume(), this operation does not scan
-// disks.
-for (String bpid : volumeMap.getBlockPoolList()) {
-  for (Iterator it = volumeMap.replicas(bpid).iterator();
-   it.hasNext(); ) {
-ReplicaInfo block = it.next();
-final File absBasePath =
-new File(block.getVolume().getBasePath()).getAbsoluteFile();
-if (absBasePath.equals(absRoot)) {
-  invalidate(bpid, block);
-  it.remove();
+
+Map> blkToInvalidate = new HashMap<>();
+List storageToRemove = new ArrayList<>();
+synchronized (this) {
+  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
+Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
+final File absRoot = sd.getRoot().getAbsoluteFile();
+if (volumesToRemove.contains(absRoot)) {
+  LOG.info("Removing " + a

hadoop git commit: HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by Walter Su. (cherry picked from commit a48301791e9564363bc2abad4e89e344b0d7a5ff)

2015-12-11 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 02f666b32 -> a02312546


HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by 
Walter Su.
(cherry picked from commit a48301791e9564363bc2abad4e89e344b0d7a5ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0231254
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0231254
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0231254

Branch: refs/heads/branch-2.7
Commit: a02312546a60372c108f8671516b29efe541a196
Parents: 02f666b
Author: Kihwal Lee 
Authored: Fri Dec 11 08:47:22 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Dec 11 08:47:22 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 89 
 .../fsdataset/impl/TestFsDatasetImpl.java   |  4 +
 3 files changed, 59 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0231254/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f4e01ee..f0a083d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -135,6 +135,9 @@ Release 2.7.2 - UNRELEASED
 HDFS-9294. DFSClient deadlock when close file and failed to renew lease.
 (Brahma Reddy Battula via szetszwo)
 
+HDFS-9445. Datanode may deadlock while handling a bad volume.
+(Wlater Su via Kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0231254/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index dc773fe..fdee4e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -450,48 +450,67 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* Removes a set of volumes from FsDataset.
* @param volumesToRemove a set of absolute root path of each volume.
* @param clearFailure set true to clear failure information.
-   *
-   * DataNode should call this function before calling
-   * {@link DataStorage#removeVolumes(java.util.Collection)}.
*/
   @Override
-  public synchronized void removeVolumes(
-  Set volumesToRemove, boolean clearFailure) {
+  public void removeVolumes(Set volumesToRemove, boolean clearFailure) {
 // Make sure that all volumes are absolute path.
 for (File vol : volumesToRemove) {
   Preconditions.checkArgument(vol.isAbsolute(),
   String.format("%s is not absolute path.", vol.getPath()));
 }
-for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
-  Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-  final File absRoot = sd.getRoot().getAbsoluteFile();
-  if (volumesToRemove.contains(absRoot)) {
-LOG.info("Removing " + absRoot + " from FsDataset.");
-
-// Disable the volume from the service.
-asyncDiskService.removeVolume(sd.getCurrentDir());
-volumes.removeVolume(absRoot, clearFailure);
-
-// Removed all replica information for the blocks on the volume. Unlike
-// updating the volumeMap in addVolume(), this operation does not scan
-// disks.
-for (String bpid : volumeMap.getBlockPoolList()) {
-  for (Iterator it = volumeMap.replicas(bpid).iterator();
-   it.hasNext(); ) {
-ReplicaInfo block = it.next();
-final File absBasePath =
-new File(block.getVolume().getBasePath()).getAbsoluteFile();
-if (absBasePath.equals(absRoot)) {
-  invalidate(bpid, block);
-  it.remove();
+
+Map> blkToInvalidate = new HashMap<>();
+List storageToRemove = new ArrayList<>();
+synchronized (this) {
+  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
+Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
+final File absRoot = sd.getRoot().getAbsoluteFile();
+if (volumesToRemove.contains(absRoot)) {
+  LOG.info("Removing " + a

hadoop git commit: HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by Walter Su. (cherry picked from commit a48301791e9564363bc2abad4e89e344b0d7a5ff)

2015-12-11 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7.2 b90d4d331 -> 574f73403


HDFS-9445. Datanode may deadlock while handling a bad volume. Contributed by 
Walter Su.
(cherry picked from commit a48301791e9564363bc2abad4e89e344b0d7a5ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/574f7340
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/574f7340
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/574f7340

Branch: refs/heads/branch-2.7.2
Commit: 574f73403fffa2fb51d1a8f8ef0385a5175e266f
Parents: b90d4d3
Author: Kihwal Lee 
Authored: Fri Dec 11 08:48:29 2015 -0600
Committer: Kihwal Lee 
Committed: Fri Dec 11 08:48:29 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 89 
 .../fsdataset/impl/TestFsDatasetImpl.java   |  4 +
 3 files changed, 59 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/574f7340/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f47efec..aa621c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -101,6 +101,9 @@ Release 2.7.2 - 2015-11-11
 HDFS-9294. DFSClient deadlock when close file and failed to renew lease.
 (Brahma Reddy Battula via szetszwo)
 
+HDFS-9445. Datanode may deadlock while handling a bad volume.
+(Wlater Su via Kihwal)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/574f7340/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index dc773fe..fdee4e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -450,48 +450,67 @@ class FsDatasetImpl implements FsDatasetSpi 
{
* Removes a set of volumes from FsDataset.
* @param volumesToRemove a set of absolute root path of each volume.
* @param clearFailure set true to clear failure information.
-   *
-   * DataNode should call this function before calling
-   * {@link DataStorage#removeVolumes(java.util.Collection)}.
*/
   @Override
-  public synchronized void removeVolumes(
-  Set volumesToRemove, boolean clearFailure) {
+  public void removeVolumes(Set volumesToRemove, boolean clearFailure) {
 // Make sure that all volumes are absolute path.
 for (File vol : volumesToRemove) {
   Preconditions.checkArgument(vol.isAbsolute(),
   String.format("%s is not absolute path.", vol.getPath()));
 }
-for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
-  Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
-  final File absRoot = sd.getRoot().getAbsoluteFile();
-  if (volumesToRemove.contains(absRoot)) {
-LOG.info("Removing " + absRoot + " from FsDataset.");
-
-// Disable the volume from the service.
-asyncDiskService.removeVolume(sd.getCurrentDir());
-volumes.removeVolume(absRoot, clearFailure);
-
-// Removed all replica information for the blocks on the volume. Unlike
-// updating the volumeMap in addVolume(), this operation does not scan
-// disks.
-for (String bpid : volumeMap.getBlockPoolList()) {
-  for (Iterator it = volumeMap.replicas(bpid).iterator();
-   it.hasNext(); ) {
-ReplicaInfo block = it.next();
-final File absBasePath =
-new File(block.getVolume().getBasePath()).getAbsoluteFile();
-if (absBasePath.equals(absRoot)) {
-  invalidate(bpid, block);
-  it.remove();
+
+Map> blkToInvalidate = new HashMap<>();
+List storageToRemove = new ArrayList<>();
+synchronized (this) {
+  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
+Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
+final File absRoot = sd.getRoot().getAbsoluteFile();
+if (volumesToRemove.contains(absRoot)) {
+  LOG.info("Removing &qu

hadoop git commit: HDFS-9569. Log the name of the fsimage being loaded for better supportability. Contributed by Yongjun Zhang.

2015-12-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk d90625e03 -> eb6939cea


HDFS-9569.  Log the name of the fsimage being loaded for better supportability. 
Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb6939ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb6939ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb6939ce

Branch: refs/heads/trunk
Commit: eb6939cea0343840c62b930d4adb377f5eaf879f
Parents: d90625e
Author: Kihwal Lee 
Authored: Thu Dec 17 10:29:41 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 17 10:29:41 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/FSImage.java   | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb6939ce/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cf2e77c..c8e5748 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2506,6 +2506,9 @@ Release 2.7.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-9569. Log the name of the fsimage being loaded for better
+supportability (Yongjun Zhang via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb6939ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index dedbb32..3d58a7d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -671,8 +671,8 @@ public class FSImage implements Closeable {
 imageFile = imageFiles.get(i);
 loadFSImageFile(target, recovery, imageFile, startOpt);
 break;
-  } catch (IOException ioe) {
-LOG.error("Failed to load image from " + imageFile, ioe);
+  } catch (Exception e) {
+LOG.error("Failed to load image from " + imageFile, e);
 target.clear();
 imageFile = null;
   }
@@ -721,7 +721,7 @@ public class FSImage implements Closeable {
 
   void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
   FSImageFile imageFile, StartupOption startupOption) throws IOException {
-LOG.debug("Planning to load image :\n" + imageFile);
+LOG.info("Planning to load image: " + imageFile);
 StorageDirectory sdForProperties = imageFile.sd;
 storage.readProperties(sdForProperties, startupOption);
 



hadoop git commit: HDFS-9569. Log the name of the fsimage being loaded for better supportability. Contributed by Yongjun Zhang. (cherry picked from commit eb6939cea0343840c62b930d4adb377f5eaf879f)

2015-12-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3d0222eaf -> 03e10f1fb


HDFS-9569.  Log the name of the fsimage being loaded for better supportability. 
Contributed by Yongjun Zhang.
(cherry picked from commit eb6939cea0343840c62b930d4adb377f5eaf879f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03e10f1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03e10f1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03e10f1f

Branch: refs/heads/branch-2
Commit: 03e10f1fb3fade650cb054ddf5f3a152871b8c0f
Parents: 3d0222e
Author: Kihwal Lee 
Authored: Thu Dec 17 10:30:58 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 17 10:30:58 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/FSImage.java   | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03e10f1f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fa70307..268a589 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1644,6 +1644,9 @@ Release 2.7.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-9569. Log the name of the fsimage being loaded for better
+supportability (Yongjun Zhang via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03e10f1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 085c0f8..224d2c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -671,8 +671,8 @@ public class FSImage implements Closeable {
 imageFile = imageFiles.get(i);
 loadFSImageFile(target, recovery, imageFile, startOpt);
 break;
-  } catch (IOException ioe) {
-LOG.error("Failed to load image from " + imageFile, ioe);
+  } catch (Exception e) {
+LOG.error("Failed to load image from " + imageFile, e);
 target.clear();
 imageFile = null;
   }
@@ -725,7 +725,7 @@ public class FSImage implements Closeable {
 
   void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
   FSImageFile imageFile, StartupOption startupOption) throws IOException {
-LOG.debug("Planning to load image :\n" + imageFile);
+LOG.info("Planning to load image: " + imageFile);
 StorageDirectory sdForProperties = imageFile.sd;
 storage.readProperties(sdForProperties, startupOption);
 



hadoop git commit: HDFS-9569. Log the name of the fsimage being loaded for better supportability. Contributed by Yongjun Zhang. (cherry picked from commit eb6939cea0343840c62b930d4adb377f5eaf879f)

2015-12-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 92de57bdc -> a8dc35dfd


HDFS-9569.  Log the name of the fsimage being loaded for better supportability. 
Contributed by Yongjun Zhang.
(cherry picked from commit eb6939cea0343840c62b930d4adb377f5eaf879f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8dc35df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8dc35df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8dc35df

Branch: refs/heads/branch-2.8
Commit: a8dc35dfdf9f3ab82d88a7ef2d03e172d594499b
Parents: 92de57b
Author: Kihwal Lee 
Authored: Thu Dec 17 10:31:37 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 17 10:31:37 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/FSImage.java   | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8dc35df/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b4d5a8b..cd79c9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1596,6 +1596,9 @@ Release 2.7.3 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-9569. Log the name of the fsimage being loaded for better
+supportability (Yongjun Zhang via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8dc35df/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 085c0f8..224d2c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -671,8 +671,8 @@ public class FSImage implements Closeable {
 imageFile = imageFiles.get(i);
 loadFSImageFile(target, recovery, imageFile, startOpt);
 break;
-  } catch (IOException ioe) {
-LOG.error("Failed to load image from " + imageFile, ioe);
+  } catch (Exception e) {
+LOG.error("Failed to load image from " + imageFile, e);
 target.clear();
 imageFile = null;
   }
@@ -725,7 +725,7 @@ public class FSImage implements Closeable {
 
   void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
   FSImageFile imageFile, StartupOption startupOption) throws IOException {
-LOG.debug("Planning to load image :\n" + imageFile);
+LOG.info("Planning to load image: " + imageFile);
 StorageDirectory sdForProperties = imageFile.sd;
 storage.readProperties(sdForProperties, startupOption);
 



hadoop git commit: HDFS-9569. Log the name of the fsimage being loaded for better supportability. Contributed by Yongjun Zhang. (cherry picked from commit eb6939cea0343840c62b930d4adb377f5eaf879f)

2015-12-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 face83775 -> 4d2ee51c5


HDFS-9569.  Log the name of the fsimage being loaded for better supportability. 
Contributed by Yongjun Zhang.
(cherry picked from commit eb6939cea0343840c62b930d4adb377f5eaf879f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d2ee51c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d2ee51c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d2ee51c

Branch: refs/heads/branch-2.7
Commit: 4d2ee51c52d371a10501a76f5bfc2d78a40e2083
Parents: face837
Author: Kihwal Lee 
Authored: Thu Dec 17 10:33:15 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 17 10:33:15 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/hdfs/server/namenode/FSImage.java   | 6 +++---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d2ee51c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b7b3cda..f95eada 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -11,6 +11,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-8647. Abstract BlockManager's rack policy into BlockPlacementPolicy.
 (Brahma Reddy Battula via mingma)
 
+HDFS-9569. Log the name of the fsimage being loaded for better
+supportability (Yongjun Zhang via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d2ee51c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 5a7fbb1..475ae7e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -675,8 +675,8 @@ public class FSImage implements Closeable {
 imageFile = imageFiles.get(i);
 loadFSImageFile(target, recovery, imageFile, startOpt);
 break;
-  } catch (IOException ioe) {
-LOG.error("Failed to load image from " + imageFile, ioe);
+  } catch (Exception e) {
+LOG.error("Failed to load image from " + imageFile, e);
 target.clear();
 imageFile = null;
   }
@@ -727,7 +727,7 @@ public class FSImage implements Closeable {
 
   void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
   FSImageFile imageFile, StartupOption startupOption) throws IOException {
-LOG.debug("Planning to load image :\n" + imageFile);
+LOG.info("Planning to load image: " + imageFile);
 StorageDirectory sdForProperties = imageFile.sd;
 storage.readProperties(sdForProperties, startupOption);
 



hadoop git commit: HDFS-9533. seen_txid in the shared edits directory is modified during bootstrapping. Contributed by Kihwal Lee.

2015-12-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 03bab8dea -> 5cb1e0118


HDFS-9533. seen_txid in the shared edits directory is modified during 
bootstrapping. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cb1e011
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cb1e011
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cb1e011

Branch: refs/heads/trunk
Commit: 5cb1e0118b173a95c1f7bdfae1e58d7833d61c26
Parents: 03bab8d
Author: Kihwal Lee 
Authored: Thu Dec 17 17:11:16 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 17 17:11:16 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop/hdfs/server/namenode/NNStorage.java| 18 +-
 .../hdfs/server/namenode/ha/BootstrapStandby.java |  6 --
 .../hdfs/server/namenode/FSImageTestUtil.java |  7 +++
 .../server/namenode/ha/TestBootstrapStandby.java  | 11 +++
 5 files changed, 42 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cb1e011/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fc9dfe9..f896544 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2540,6 +2540,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9516. Truncate file fails with data dirs on multiple disks.
 (Plamen Jeliazkov via shv)
 
+HDFS-9533. seen_txid in the shared edits directory is modified during
+bootstrapping (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cb1e011/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 9b63e72..ed36b27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -482,8 +482,24 @@ public class NNStorage extends Storage implements 
Closeable,
* @param txid the txid that has been reached
*/
   public void writeTransactionIdFileToStorage(long txid) {
+writeTransactionIdFileToStorage(txid, null);
+  }
+
+  /**
+   * Write a small file in all available storage directories that
+   * indicates that the namespace has reached some given transaction ID.
+   *
+   * This is used when the image is loaded to avoid accidental rollbacks
+   * in the case where an edit log is fully deleted but there is no
+   * checkpoint. See TestNameEditsConfigs.testNameEditsConfigsFailure()
+   * @param txid the txid that has been reached
+   * @param type the type of directory
+   */
+  public void writeTransactionIdFileToStorage(long txid,
+  NameNodeDirType type) {
 // Write txid marker in all storage directories
-for (StorageDirectory sd : storageDirs) {
+for (Iterator it = dirIterator(type); it.hasNext();) {
+  StorageDirectory sd = it.next();
   try {
 writeTransactionIdFile(sd, txid);
   } catch(IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cb1e011/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index f694a7f..d84b0fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
@@ -329,13 +330

hadoop git commit: HDFS-9533. seen_txid in the shared edits directory is modified during bootstrapping. Contributed by Kihwal Lee. (cherry picked from commit 5cb1e0118b173a95c1f7bdfae1e58d7833d61c26)

2015-12-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6d2914a69 -> 3c0ae5914


HDFS-9533. seen_txid in the shared edits directory is modified during 
bootstrapping. Contributed by Kihwal Lee.
(cherry picked from commit 5cb1e0118b173a95c1f7bdfae1e58d7833d61c26)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c0ae591
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c0ae591
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c0ae591

Branch: refs/heads/branch-2
Commit: 3c0ae5914ca673b055a9673b4c17c8aaa570c258
Parents: 6d2914a
Author: Kihwal Lee 
Authored: Thu Dec 17 17:22:26 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 17 17:23:06 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop/hdfs/server/namenode/NNStorage.java| 18 +-
 .../hdfs/server/namenode/ha/BootstrapStandby.java |  6 --
 .../hdfs/server/namenode/FSImageTestUtil.java |  7 +++
 .../server/namenode/ha/TestBootstrapStandby.java  | 11 +++
 5 files changed, 42 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0ae591/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index db4f565..40ef24c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1677,6 +1677,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9516. Truncate file fails with data dirs on multiple disks.
 (Plamen Jeliazkov via shv)
 
+HDFS-9533. seen_txid in the shared edits directory is modified during
+bootstrapping (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0ae591/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 9b63e72..ed36b27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -482,8 +482,24 @@ public class NNStorage extends Storage implements 
Closeable,
* @param txid the txid that has been reached
*/
   public void writeTransactionIdFileToStorage(long txid) {
+writeTransactionIdFileToStorage(txid, null);
+  }
+
+  /**
+   * Write a small file in all available storage directories that
+   * indicates that the namespace has reached some given transaction ID.
+   *
+   * This is used when the image is loaded to avoid accidental rollbacks
+   * in the case where an edit log is fully deleted but there is no
+   * checkpoint. See TestNameEditsConfigs.testNameEditsConfigsFailure()
+   * @param txid the txid that has been reached
+   * @param type the type of directory
+   */
+  public void writeTransactionIdFileToStorage(long txid,
+  NameNodeDirType type) {
 // Write txid marker in all storage directories
-for (StorageDirectory sd : storageDirs) {
+for (Iterator it = dirIterator(type); it.hasNext();) {
+  StorageDirectory sd = it.next();
   try {
 writeTransactionIdFile(sd, txid);
   } catch(IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c0ae591/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index 0311708..213141c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 imp

hadoop git commit: HDFS-9533. seen_txid in the shared edits directory is modified during bootstrapping. Contributed by Kihwal Lee. (cherry picked from commit 5cb1e0118b173a95c1f7bdfae1e58d7833d61c26)

2015-12-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8812be3dc -> e5e5ba495


HDFS-9533. seen_txid in the shared edits directory is modified during 
bootstrapping. Contributed by Kihwal Lee.
(cherry picked from commit 5cb1e0118b173a95c1f7bdfae1e58d7833d61c26)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
(cherry picked from commit 3c0ae5914ca673b055a9673b4c17c8aaa570c258)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5e5ba49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5e5ba49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5e5ba49

Branch: refs/heads/branch-2.8
Commit: e5e5ba495f8f68c546b70c5e8ad2c81ebfb27588
Parents: 8812be3
Author: Kihwal Lee 
Authored: Thu Dec 17 17:23:55 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 17 17:23:55 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop/hdfs/server/namenode/NNStorage.java| 18 +-
 .../hdfs/server/namenode/ha/BootstrapStandby.java |  6 --
 .../hdfs/server/namenode/FSImageTestUtil.java |  7 +++
 .../server/namenode/ha/TestBootstrapStandby.java  | 11 +++
 5 files changed, 42 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e5ba49/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e8b9602..e0fe04e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1629,6 +1629,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9516. Truncate file fails with data dirs on multiple disks.
 (Plamen Jeliazkov via shv)
 
+HDFS-9533. seen_txid in the shared edits directory is modified during
+bootstrapping (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e5ba49/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 9b63e72..ed36b27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -482,8 +482,24 @@ public class NNStorage extends Storage implements 
Closeable,
* @param txid the txid that has been reached
*/
   public void writeTransactionIdFileToStorage(long txid) {
+writeTransactionIdFileToStorage(txid, null);
+  }
+
+  /**
+   * Write a small file in all available storage directories that
+   * indicates that the namespace has reached some given transaction ID.
+   *
+   * This is used when the image is loaded to avoid accidental rollbacks
+   * in the case where an edit log is fully deleted but there is no
+   * checkpoint. See TestNameEditsConfigs.testNameEditsConfigsFailure()
+   * @param txid the txid that has been reached
+   * @param type the type of directory
+   */
+  public void writeTransactionIdFileToStorage(long txid,
+  NameNodeDirType type) {
 // Write txid marker in all storage directories
-for (StorageDirectory sd : storageDirs) {
+for (Iterator it = dirIterator(type); it.hasNext();) {
+  StorageDirectory sd = it.next();
   try {
 writeTransactionIdFile(sd, txid);
   } catch(IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e5ba49/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index 0311708..213141c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesys

hadoop git commit: HDFS-9533. seen_txid in the shared edits directory is modified during bootstrapping. Contributed by Kihwal Lee. (cherry picked from commit 5cb1e0118b173a95c1f7bdfae1e58d7833d61c26)

2015-12-17 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 56882e7b1 -> 03292b234


HDFS-9533. seen_txid in the shared edits directory is modified during 
bootstrapping. Contributed by Kihwal Lee.
(cherry picked from commit 5cb1e0118b173a95c1f7bdfae1e58d7833d61c26)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
(cherry picked from commit 3c0ae5914ca673b055a9673b4c17c8aaa570c258)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03292b23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03292b23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03292b23

Branch: refs/heads/branch-2.7
Commit: 03292b234e9bb2c7867accbf93a7d11cc894b629
Parents: 56882e7
Author: Kihwal Lee 
Authored: Thu Dec 17 17:28:24 2015 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 17 17:28:24 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hadoop/hdfs/server/namenode/NNStorage.java| 18 +-
 .../hdfs/server/namenode/ha/BootstrapStandby.java |  6 --
 .../hdfs/server/namenode/FSImageTestUtil.java |  7 +++
 .../server/namenode/ha/TestBootstrapStandby.java  | 12 
 5 files changed, 43 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03292b23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b7b3cda..dcaff35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -36,6 +36,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9516. Truncate file fails with data dirs on multiple disks.
 (Plamen Jeliazkov via shv)
 
+HDFS-9533. seen_txid in the shared edits directory is modified during
+bootstrapping (kihwal)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03292b23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index dbb2c50..98699e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -473,8 +473,24 @@ public class NNStorage extends Storage implements 
Closeable,
* @param txid the txid that has been reached
*/
   public void writeTransactionIdFileToStorage(long txid) {
+writeTransactionIdFileToStorage(txid, null);
+  }
+
+  /**
+   * Write a small file in all available storage directories that
+   * indicates that the namespace has reached some given transaction ID.
+   *
+   * This is used when the image is loaded to avoid accidental rollbacks
+   * in the case where an edit log is fully deleted but there is no
+   * checkpoint. See TestNameEditsConfigs.testNameEditsConfigsFailure()
+   * @param txid the txid that has been reached
+   * @param type the type of directory
+   */
+  public void writeTransactionIdFileToStorage(long txid,
+  NameNodeDirType type) {
 // Write txid marker in all storage directories
-for (StorageDirectory sd : storageDirs) {
+for (Iterator it = dirIterator(type); it.hasNext();) {
+  StorageDirectory sd = it.next();
   try {
 writeTransactionIdFile(sd, txid);
   } catch(IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03292b23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index 4bd53a5..2d83cba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesys

hadoop git commit: HDFS-7163. WebHdfsFileSystem should retry reads according to the configured retry policy. Contributed by Eric Payne.

2015-12-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5c0ff6961 -> 867048c3e


HDFS-7163. WebHdfsFileSystem should retry reads according to the configured 
retry policy. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/867048c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/867048c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/867048c3

Branch: refs/heads/trunk
Commit: 867048c3e4b20ece0039a876def129fa5eb9234f
Parents: 5c0ff69
Author: Kihwal Lee 
Authored: Tue Dec 22 14:08:23 2015 -0600
Committer: Kihwal Lee 
Committed: Tue Dec 22 14:08:23 2015 -0600

--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 365 ++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |   4 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |   2 +-
 .../hdfs/server/namenode/TestAuditLogs.java |   2 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 158 
 .../hdfs/web/TestWebHdfsFileSystemContract.java |   2 +-
 .../hadoop/hdfs/web/TestWebHdfsTokens.java  |   6 +-
 8 files changed, 526 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/867048c3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 4049b80..1fa95c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
+import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -34,8 +36,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
 
+import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -44,6 +49,7 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
@@ -545,7 +551,7 @@ public class WebHdfsFileSystem extends FileSystem
  * Also implements two-step connects for other operations redirected to
  * a DN such as open and checksum
  */
-private HttpURLConnection connect(URL url) throws IOException {
+protected HttpURLConnection connect(URL url) throws IOException {
   //redirect hostname and port
   String redirectHost = null;
 
@@ -698,7 +704,7 @@ public class WebHdfsFileSystem extends FileSystem
*/
   abstract class AbstractFsPathRunner extends AbstractRunner {
 private final Path fspath;
-private final Param[] parameters;
+private Param[] parameters;
 
 AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
 Param... parameters) {
@@ -714,6 +720,10 @@ public class WebHdfsFileSystem extends FileSystem
   this.parameters = parameters;
 }
 
+protected void updateURLParameters(Param... p) {
+  this.parameters = p;
+}
+
 @Override
 protected URL getUrl() throws IOException {
   if (excludeDatanodes.getValue() != null) {
@@ -1235,15 +1245,10 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   @Override
-  public FSDataInputStream open(final Path f, final int buffersize
+  public FSDataInputStream open(final Path f, final int bufferSize
   ) throws IOException {
 statistics.incrementReadOps(1);
-final HttpOpParam.Op op = GetOpParam.Op.OPEN;
-// use a runner so the open can recover from an invalid token
-FsPathConnectionRunner runner =
-new FsPathConnectionRunner(op, f, new BufferSizeParam(buffersize));
-return new FSDataInputStream(new OffsetUrlInputStream(
-new UnresolvedUrlOpener(runner), new OffsetUrlOpener(null)));
+return new FSDataInputStream(new WebHdfsInputStream(f, bufferSize));
   }
 
   @Override
@@ -1524,4 +1529,346 @@ pub

hadoop git commit: HDFS-7163. WebHdfsFileSystem should retry reads according to the configured retry policy. Contributed by Eric Payne. (cherry picked from commit 867048c3e4b20ece0039a876def129fa5eb92

2015-12-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9b039a500 -> 131260f0a


HDFS-7163. WebHdfsFileSystem should retry reads according to the configured 
retry policy. Contributed by Eric Payne.
(cherry picked from commit 867048c3e4b20ece0039a876def129fa5eb9234f)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/131260f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/131260f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/131260f0

Branch: refs/heads/branch-2
Commit: 131260f0a7e8480e03b20d6e6327d8b468a9313d
Parents: 9b039a5
Author: Kihwal Lee 
Authored: Tue Dec 22 14:19:12 2015 -0600
Committer: Kihwal Lee 
Committed: Tue Dec 22 14:19:12 2015 -0600

--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 365 ++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |   2 +-
 .../hdfs/server/namenode/TestAuditLogs.java |   2 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 159 
 .../hdfs/web/TestWebHdfsFileSystemContract.java |   2 +-
 .../hadoop/hdfs/web/TestWebHdfsTokens.java  |   6 +-
 7 files changed, 525 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/131260f0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index b528fdb..096ba7b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -18,7 +18,9 @@ res * Licensed to the Apache Software Foundation (ASF) under 
one
 
 package org.apache.hadoop.hdfs.web;
 
+import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
+import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -34,8 +36,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
 
+import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -44,6 +49,7 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
@@ -554,7 +560,7 @@ public class WebHdfsFileSystem extends FileSystem
  * Also implements two-step connects for other operations redirected to
  * a DN such as open and checksum
  */
-private HttpURLConnection connect(URL url) throws IOException {
+protected HttpURLConnection connect(URL url) throws IOException {
   //redirect hostname and port
   String redirectHost = null;
 
@@ -707,7 +713,7 @@ public class WebHdfsFileSystem extends FileSystem
*/
   abstract class AbstractFsPathRunner extends AbstractRunner {
 private final Path fspath;
-private final Param[] parameters;
+private Param[] parameters;
 
 AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
 Param... parameters) {
@@ -723,6 +729,10 @@ public class WebHdfsFileSystem extends FileSystem
   this.parameters = parameters;
 }
 
+protected void updateURLParameters(Param... p) {
+  this.parameters = p;
+}
+
 @Override
 protected URL getUrl() throws IOException {
   if (excludeDatanodes.getValue() != null) {
@@ -1232,15 +1242,10 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   @Override
-  public FSDataInputStream open(final Path f, final int buffersize
+  public FSDataInputStream open(final Path f, final int bufferSize
   ) throws IOException {
 statistics.incrementReadOps(1);
-final HttpOpParam.Op op = GetOpParam.Op.OPEN;
-// use a runner so the open can recover from an invalid token
-FsPathConnectionRunner runner =
-new FsPathConnectionRunner(op, f, new BufferSizeParam(buffersize));
-return new FSDataInputStream(new OffsetUrlInputStr

hadoop git commit: HDFS-7163. WebHdfsFileSystem should retry reads according to the configured retry policy. Contributed by Eric Payne. (cherry picked from commit 867048c3e4b20ece0039a876def129fa5eb92

2015-12-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8fad4c780 -> 263c54402


HDFS-7163. WebHdfsFileSystem should retry reads according to the configured 
retry policy. Contributed by Eric Payne.
(cherry picked from commit 867048c3e4b20ece0039a876def129fa5eb9234f)
(cherry picked from commit 131260f0a7e8480e03b20d6e6327d8b468a9313d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/263c5440
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/263c5440
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/263c5440

Branch: refs/heads/branch-2.8
Commit: 263c544021d44b6ed4ec311646948610be85d197
Parents: 8fad4c7
Author: Kihwal Lee 
Authored: Tue Dec 22 14:20:10 2015 -0600
Committer: Kihwal Lee 
Committed: Tue Dec 22 14:20:10 2015 -0600

--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 365 ++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |   2 +-
 .../hdfs/server/namenode/TestAuditLogs.java |   2 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 159 
 .../hdfs/web/TestWebHdfsFileSystemContract.java |   2 +-
 .../hadoop/hdfs/web/TestWebHdfsTokens.java  |   6 +-
 7 files changed, 525 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/263c5440/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index b528fdb..096ba7b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -18,7 +18,9 @@ res * Licensed to the Apache Software Foundation (ASF) under 
one
 
 package org.apache.hadoop.hdfs.web;
 
+import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
+import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -34,8 +36,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
 
+import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -44,6 +49,7 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
@@ -554,7 +560,7 @@ public class WebHdfsFileSystem extends FileSystem
  * Also implements two-step connects for other operations redirected to
  * a DN such as open and checksum
  */
-private HttpURLConnection connect(URL url) throws IOException {
+protected HttpURLConnection connect(URL url) throws IOException {
   //redirect hostname and port
   String redirectHost = null;
 
@@ -707,7 +713,7 @@ public class WebHdfsFileSystem extends FileSystem
*/
   abstract class AbstractFsPathRunner extends AbstractRunner {
 private final Path fspath;
-private final Param[] parameters;
+private Param[] parameters;
 
 AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
 Param... parameters) {
@@ -723,6 +729,10 @@ public class WebHdfsFileSystem extends FileSystem
   this.parameters = parameters;
 }
 
+protected void updateURLParameters(Param... p) {
+  this.parameters = p;
+}
+
 @Override
 protected URL getUrl() throws IOException {
   if (excludeDatanodes.getValue() != null) {
@@ -1232,15 +1242,10 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   @Override
-  public FSDataInputStream open(final Path f, final int buffersize
+  public FSDataInputStream open(final Path f, final int bufferSize
   ) throws IOException {
 statistics.incrementReadOps(1);
-final HttpOpParam.Op op = GetOpParam.Op.OPEN;
-// use a runner so the open can recover from an invalid token
-FsPathConnectionRunner runner =
-new FsPathConnectionRunner(op, f, new BufferSizeParam(buffersize));
-return new FSDataInputStream(new OffsetUrlInputStream(
-new UnresolvedUrlOpener(run

hadoop git commit: HDFS-7163. WebHdfsFileSystem should retry reads according to the configured retry policy. Contributed by Eric Payne.

2015-12-28 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e0e5a0fa8 -> cdf9f1c66


HDFS-7163. WebHdfsFileSystem should retry reads according to the configured 
retry policy. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdf9f1c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdf9f1c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdf9f1c6

Branch: refs/heads/branch-2.7
Commit: cdf9f1c662fd0216d729e15404407cc6ac3072ac
Parents: e0e5a0f
Author: Kihwal Lee 
Authored: Mon Dec 28 10:57:31 2015 -0600
Committer: Kihwal Lee 
Committed: Mon Dec 28 10:57:31 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 365 ++-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |   2 +-
 .../hdfs/server/namenode/TestAuditLogs.java |   2 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 160 
 .../hdfs/web/TestWebHdfsFileSystemContract.java |   2 +-
 .../hadoop/hdfs/web/TestWebHdfsTokens.java  |   6 +-
 7 files changed, 526 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdf9f1c6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 92e0ef0..4ad1cdb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -11,6 +11,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-8647. Abstract BlockManager's rack policy into BlockPlacementPolicy.
 (Brahma Reddy Battula via mingma)
 
+HDFS-7163. WebHdfsFileSystem should retry reads according to the configured
+retry policy. (Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdf9f1c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index eea133b..a7bf6a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -18,7 +18,9 @@ res * Licensed to the Apache Software Foundation (ASF) under 
one
 
 package org.apache.hadoop.hdfs.web;
 
+import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
+import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
@@ -34,8 +36,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
 
+import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.MediaType;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -45,6 +50,7 @@ import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
@@ -516,7 +522,7 @@ public class WebHdfsFileSystem extends FileSystem
  * Also implements two-step connects for other operations redirected to
  * a DN such as open and checksum
  */
-private HttpURLConnection connect(URL url) throws IOException {
+protected HttpURLConnection connect(URL url) throws IOException {
   //redirect hostname and port
   String redirectHost = null;
 
@@ -669,7 +675,7 @@ public class WebHdfsFileSystem extends FileSystem
*/
   abstract class AbstractFsPathRunner extends AbstractRunner {
 private final Path fspath;
-private final Param[] parameters;
+private Param[] parameters;
 
 AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
 Param... parameters) {
@@ -684,6 +690,10 @@ public class WebHdfsFileSystem extends FileSystem
   this.fspath = fspath;
   this.parameters = parameters;
 }
+
+protected void updateURLParameters(Param... p) {
+  this.parameters = p;
+}
 
 @Override
 protected URL getUrl() throws IOException {
@@ -1178,15 +1188,10 @@ public class WebHdfsFileSystem extends Fi

hadoop git commit: HDFS-9574. Reduce client failures during datanode restart. Contributed by Kihwal Lee.

2016-01-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed18527e3 -> 38c4c1447


HDFS-9574. Reduce client failures during datanode restart. Contributed by 
Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38c4c144
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38c4c144
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38c4c144

Branch: refs/heads/trunk
Commit: 38c4c14472996562eb3d610649246770c2888c6b
Parents: ed18527
Author: Kihwal Lee 
Authored: Fri Jan 8 11:13:25 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 8 11:13:58 2016 -0600

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  59 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../hadoop/hdfs/server/datanode/DNConf.java |  12 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  11 +-
 .../server/datanode/DataNodeFaultInjector.java  |   2 +
 .../hdfs/server/datanode/DataXceiver.java   | 109 +++
 .../src/main/resources/hdfs-default.xml |  10 ++
 .../TestDataXceiverLazyPersistHint.java |   6 +-
 .../fsdataset/impl/TestDatanodeRestart.java |  72 
 10 files changed, 224 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c4c144/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6823c1f..b6b11ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -72,10 +73,12 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.IdentityHashStore;
+import org.apache.hadoop.util.StopWatch;
 import org.apache.htrace.core.SpanId;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
@@ -357,12 +360,18 @@ public class DFSInputStream extends FSInputStream
 int replicaNotFoundCount = locatedblock.getLocations().length;
 
 final DfsClientConf conf = dfsClient.getConf();
-for(DatanodeInfo datanode : locatedblock.getLocations()) {
+final int timeout = conf.getSocketTimeout();
+LinkedList nodeList = new LinkedList(
+Arrays.asList(locatedblock.getLocations()));
+LinkedList retryList = new LinkedList();
+boolean isRetry = false;
+StopWatch sw = new StopWatch();
+while (nodeList.size() > 0) {
+  DatanodeInfo datanode = nodeList.pop();
   ClientDatanodeProtocol cdp = null;
-
   try {
 cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode,
-dfsClient.getConfiguration(), conf.getSocketTimeout(),
+dfsClient.getConfiguration(), timeout,
 conf.isConnectToDnViaHostname(), locatedblock);
 
 final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
@@ -370,15 +379,19 @@ public class DFSInputStream extends FSInputStream
 if (n >= 0) {
   return n;
 }
-  }
-  catch(IOException ioe) {
-if (ioe instanceof RemoteException &&
-(((RemoteException) ioe).unwrapRemoteException() instanceof
-ReplicaNotFoundException)) {
-  // special case : replica might not be on the DN, treat as 0 length
-  replicaNotFoundCount--;
+  } catch (IOException ioe) {
+if (ioe instanceof RemoteException) {
+  if (((RemoteException) ioe).unwrapRemoteException() instanceof
+  ReplicaNotFoundException) {
+// replica is not on the DN. We will treat it as 0 length
+// if no one actually has a replica.
+replicaNotFoundCount--;
+  } else if (((RemoteException) ioe).unwrapRemoteException() instanceof
+  RetriableException) {
+// add to the list to be retried if necessary.
+retry

hadoop git commit: HDFS-9574. Reduce client failures during datanode restart. Contributed by Kihwal Lee. (cherry picked from commit 38c4c14472996562eb3d610649246770c2888c6b)

2016-01-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 20bb5c403 -> 8d04b7c27


HDFS-9574. Reduce client failures during datanode restart. Contributed by 
Kihwal Lee.
(cherry picked from commit 38c4c14472996562eb3d610649246770c2888c6b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d04b7c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d04b7c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d04b7c2

Branch: refs/heads/branch-2
Commit: 8d04b7c272c1c1ecc536a14569ce22022b5a05e1
Parents: 20bb5c4
Author: Kihwal Lee 
Authored: Fri Jan 8 11:15:12 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 8 11:15:12 2016 -0600

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  59 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../hadoop/hdfs/server/datanode/DNConf.java |  12 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  11 +-
 .../server/datanode/DataNodeFaultInjector.java  |   2 +
 .../hdfs/server/datanode/DataXceiver.java   | 109 +++
 .../src/main/resources/hdfs-default.xml |  10 ++
 .../TestDataXceiverLazyPersistHint.java |   6 +-
 .../fsdataset/impl/TestDatanodeRestart.java |  72 
 10 files changed, 224 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d04b7c2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 92a33eb..f4dad12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -73,10 +74,12 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.IdentityHashStore;
+import org.apache.hadoop.util.StopWatch;
 import org.apache.htrace.core.SpanId;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
@@ -358,12 +361,18 @@ public class DFSInputStream extends FSInputStream
 int replicaNotFoundCount = locatedblock.getLocations().length;
 
 final DfsClientConf conf = dfsClient.getConf();
-for(DatanodeInfo datanode : locatedblock.getLocations()) {
+final int timeout = conf.getSocketTimeout();
+LinkedList nodeList = new LinkedList(
+Arrays.asList(locatedblock.getLocations()));
+LinkedList retryList = new LinkedList();
+boolean isRetry = false;
+StopWatch sw = new StopWatch();
+while (nodeList.size() > 0) {
+  DatanodeInfo datanode = nodeList.pop();
   ClientDatanodeProtocol cdp = null;
-
   try {
 cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode,
-dfsClient.getConfiguration(), conf.getSocketTimeout(),
+dfsClient.getConfiguration(), timeout,
 conf.isConnectToDnViaHostname(), locatedblock);
 
 final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
@@ -371,15 +380,19 @@ public class DFSInputStream extends FSInputStream
 if (n >= 0) {
   return n;
 }
-  }
-  catch(IOException ioe) {
-if (ioe instanceof RemoteException &&
-(((RemoteException) ioe).unwrapRemoteException() instanceof
-ReplicaNotFoundException)) {
-  // special case : replica might not be on the DN, treat as 0 length
-  replicaNotFoundCount--;
+  } catch (IOException ioe) {
+if (ioe instanceof RemoteException) {
+  if (((RemoteException) ioe).unwrapRemoteException() instanceof
+  ReplicaNotFoundException) {
+// replica is not on the DN. We will treat it as 0 length
+// if no one actually has a replica.
+replicaNotFoundCount--;
+  } else if (((RemoteException) ioe).unwrapRemoteException() instanceof
+  RetriableException) {
+

hadoop git commit: HDFS-9574. Reduce client failures during datanode restart. Contributed by Kihwal Lee. (cherry picked from commit 38c4c14472996562eb3d610649246770c2888c6b)

2016-01-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 852033ca6 -> d1cbc8442


HDFS-9574. Reduce client failures during datanode restart. Contributed by 
Kihwal Lee.
(cherry picked from commit 38c4c14472996562eb3d610649246770c2888c6b)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1cbc844
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1cbc844
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1cbc844

Branch: refs/heads/branch-2.8
Commit: d1cbc8442c9677a5d8622e97f7f8ec7257d5dba7
Parents: 852033c
Author: Kihwal Lee 
Authored: Fri Jan 8 11:16:40 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 8 11:16:40 2016 -0600

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  59 --
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../hadoop/hdfs/server/datanode/DNConf.java |  12 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  11 +-
 .../server/datanode/DataNodeFaultInjector.java  |   2 +
 .../hdfs/server/datanode/DataXceiver.java   | 109 +++
 .../src/main/resources/hdfs-default.xml |  10 ++
 .../TestDataXceiverLazyPersistHint.java |   6 +-
 .../fsdataset/impl/TestDatanodeRestart.java |  72 
 10 files changed, 224 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1cbc844/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 92a33eb..f4dad12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -73,10 +74,12 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.IdentityHashStore;
+import org.apache.hadoop.util.StopWatch;
 import org.apache.htrace.core.SpanId;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
@@ -358,12 +361,18 @@ public class DFSInputStream extends FSInputStream
 int replicaNotFoundCount = locatedblock.getLocations().length;
 
 final DfsClientConf conf = dfsClient.getConf();
-for(DatanodeInfo datanode : locatedblock.getLocations()) {
+final int timeout = conf.getSocketTimeout();
+LinkedList nodeList = new LinkedList(
+Arrays.asList(locatedblock.getLocations()));
+LinkedList retryList = new LinkedList();
+boolean isRetry = false;
+StopWatch sw = new StopWatch();
+while (nodeList.size() > 0) {
+  DatanodeInfo datanode = nodeList.pop();
   ClientDatanodeProtocol cdp = null;
-
   try {
 cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode,
-dfsClient.getConfiguration(), conf.getSocketTimeout(),
+dfsClient.getConfiguration(), timeout,
 conf.isConnectToDnViaHostname(), locatedblock);
 
 final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
@@ -371,15 +380,19 @@ public class DFSInputStream extends FSInputStream
 if (n >= 0) {
   return n;
 }
-  }
-  catch(IOException ioe) {
-if (ioe instanceof RemoteException &&
-(((RemoteException) ioe).unwrapRemoteException() instanceof
-ReplicaNotFoundException)) {
-  // special case : replica might not be on the DN, treat as 0 length
-  replicaNotFoundCount--;
+  } catch (IOException ioe) {
+if (ioe instanceof RemoteException) {
+  if (((RemoteException) ioe).unwrapRemoteException() instanceof
+  ReplicaNotFoundException) {
+// replica is not on the DN. We will treat it as 0 length
+// if no one actually has a replica.
+replicaNotFoundCount--;
+  } else if (((Rem

hadoop git commit: HDFS-9574. Reduce client failures during datanode restart. Contributed by Kihwal Lee.

2016-01-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 5dc5bb281 -> b06e39de4


HDFS-9574. Reduce client failures during datanode restart. Contributed by 
Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b06e39de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b06e39de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b06e39de

Branch: refs/heads/branch-2.7
Commit: b06e39de4fc4f9c35afb472eef0bba2adf91954f
Parents: 5dc5bb2
Author: Kihwal Lee 
Authored: Fri Jan 8 11:31:49 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 8 11:31:49 2016 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  60 +++--
 .../hadoop/hdfs/server/datanode/DNConf.java |  12 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  11 +-
 .../server/datanode/DataNodeFaultInjector.java  |   2 +
 .../hdfs/server/datanode/DataXceiver.java   | 122 +++
 .../src/main/resources/hdfs-default.xml |  10 ++
 .../fsdataset/impl/TestDatanodeRestart.java |  73 +++
 9 files changed, 231 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b06e39de/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2313f13..33d9550 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -14,6 +14,8 @@ Release 2.7.3 - UNRELEASED
 HDFS-7163. WebHdfsFileSystem should retry reads according to the configured
 retry policy. (Eric Payne via kihwal)
 
+HDFS-9574. Reduce client failures during datanode restart (kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b06e39de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3bb8a0a..c2a5296 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -492,6 +492,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY 
= "dfs.datanode.min.supported.namenode.version";
   public static final String  
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "2.1.0-beta";
   public static final String  DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY = 
"dfs.namenode.inode.attributes.provider.class";
+  public static final String  DFS_DATANODE_BP_READY_TIMEOUT_KEY = 
"dfs.datanode.bp-ready.timeout";
+  public static final longDFS_DATANODE_BP_READY_TIMEOUT_DEFAULT = 20;
 
   public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = 
"dfs.block.access.token.enable";
   public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b06e39de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 9f7b15c..19dde1f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -71,10 +72,12 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.IdentityHashStore;
+import org.apache.hadoop.util.StopWatch;
 import org.apache.htrace.Span;
 import org.apac

hadoop git commit: HDFS-9574. Reduce client failures during datanode restart. Contributed by Kihwal Lee. (cherry picked from commit b06e39de4fc4f9c35afb472eef0bba2adf91954f)

2016-01-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 9cb288e9f -> 04b8a19f8


HDFS-9574. Reduce client failures during datanode restart. Contributed by 
Kihwal Lee.
(cherry picked from commit b06e39de4fc4f9c35afb472eef0bba2adf91954f)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04b8a19f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04b8a19f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04b8a19f

Branch: refs/heads/branch-2.6
Commit: 04b8a19f81ee616c315eec639642439b3a18ad9c
Parents: 9cb288e
Author: Kihwal Lee 
Authored: Fri Jan 8 12:26:05 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 8 12:26:05 2016 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  60 --
 .../hadoop/hdfs/server/datanode/DNConf.java |  12 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  11 +-
 .../server/datanode/DataNodeFaultInjector.java  |   2 +
 .../hdfs/server/datanode/DataXceiver.java   | 120 +++
 .../src/main/resources/hdfs-default.xml |  10 ++
 .../fsdataset/impl/TestDatanodeRestart.java |  73 +++
 9 files changed, 230 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04b8a19f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f760d36..3ddcfab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -20,6 +20,8 @@ Release 2.6.4 - UNRELEASED
 HDFS-6945. BlockManager should remove a block from excessReplicateMap and
 decrement ExcessBlocks metric when the block is removed. (aajisaka)
 
+HDFS-9574. Reduce client failures during datanode restart (kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04b8a19f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 85b740e..3f26105 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -470,6 +470,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0:" + 
DFS_DATANODE_IPC_DEFAULT_PORT;
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY 
= "dfs.datanode.min.supported.namenode.version";
   public static final String  
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "2.1.0-beta";
+  public static final String  DFS_DATANODE_BP_READY_TIMEOUT_KEY = 
"dfs.datanode.bp-ready.timeout";
+  public static final longDFS_DATANODE_BP_READY_TIMEOUT_DEFAULT = 20;
 
   public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = 
"dfs.block.access.token.enable";
   public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04b8a19f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index db06d3b..506b485 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import 

hadoop git commit: Supplement to HDFS-9574.

2016-01-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 04b8a19f8 -> b91715bc8


Supplement to HDFS-9574.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b91715bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b91715bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b91715bc

Branch: refs/heads/branch-2.6
Commit: b91715bc837ac0cf7e607c202bce7b3637f56f4c
Parents: 04b8a19
Author: Kihwal Lee 
Authored: Fri Jan 8 13:10:09 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 8 13:10:09 2016 -0600

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java   | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b91715bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 506b485..a9bbb77 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -323,6 +323,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 Arrays.asList(locatedblock.getLocations()));
 LinkedList retryList = new LinkedList();
 boolean isRetry = false;
+boolean timerStarted = false;
 long startTime = 0;
 while (nodeList.size() > 0) {
   DatanodeInfo datanode = nodeList.pop();
@@ -370,8 +371,9 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 
   if (isRetry) {
 // start tracking the time
-if (startTime == 0) {
+if (!timerStarted) {
   startTime = Time.monotonicNow();
+  timerStarted = true;
 }
 try {
   Thread.sleep(500); // delay between retries.
@@ -381,7 +383,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 
   // see if we ran out of retry time
-  if (startTime > 0 && (Time.monotonicNow() - startTime > timeout)) {
+  if (timerStarted && (Time.monotonicNow() - startTime > timeout)) {
 break;
   }
 }



[2/2] hadoop git commit: HDFS-9047. Retire libwebhdfs.

2016-01-13 Thread kihwal
HDFS-9047. Retire libwebhdfs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83a47912
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83a47912
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83a47912

Branch: refs/heads/branch-2.7
Commit: 83a479124317c3913729330373a9ab82698106e2
Parents: 4c09a75
Author: Kihwal Lee 
Authored: Wed Jan 13 10:59:45 2016 -0600
Committer: Kihwal Lee 
Committed: Wed Jan 13 11:01:35 2016 -0600

--
 BUILDING.txt|1 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |2 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |5 +-
 .../hadoop-hdfs/src/CMakeLists.txt  |3 -
 .../src/contrib/libwebhdfs/CMakeLists.txt   |   75 -
 .../libwebhdfs/resources/FindJansson.cmake  |   42 -
 .../contrib/libwebhdfs/src/hdfs_http_client.c   |  482 --
 .../contrib/libwebhdfs/src/hdfs_http_client.h   |  294 
 .../contrib/libwebhdfs/src/hdfs_http_query.c|  402 -
 .../contrib/libwebhdfs/src/hdfs_http_query.h|  240 ---
 .../contrib/libwebhdfs/src/hdfs_json_parser.c   |  654 
 .../contrib/libwebhdfs/src/hdfs_json_parser.h   |  178 --
 .../src/contrib/libwebhdfs/src/hdfs_web.c   | 1538 --
 .../libwebhdfs/src/test_libwebhdfs_ops.c|  552 ---
 .../libwebhdfs/src/test_libwebhdfs_read.c   |   78 -
 .../libwebhdfs/src/test_libwebhdfs_threaded.c   |  247 ---
 .../libwebhdfs/src/test_libwebhdfs_write.c  |  111 --
 17 files changed, 4 insertions(+), 4900 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83a47912/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index b30b30e..e130250 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -11,7 +11,6 @@ Requirements:
 * CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
 * Zlib devel (if compiling native code)
 * openssl devel ( if compiling native hadoop-pipes and to get the best HDFS 
encryption performance )
-* Jansson C XML parsing library ( if compiling libwebhdfs )
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above ( if compiling 
fuse_dfs )
 * Internet connection for first build (to fetch all Maven and Hadoop 
dependencies)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83a47912/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 710abf3..f6adb1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -16,6 +16,8 @@ Release 2.7.3 - UNRELEASED
 
 HDFS-9574. Reduce client failures during datanode restart (kihwal)
 
+HDFS-9047. Retire libwebhdfs (kihwal)
+
 HDFS-9569. Log the name of the fsimage being loaded for better
 supportability. (Yongjun Zhang)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83a47912/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 2701a77..fb1f315 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -34,7 +34,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 hdfs
 true
 false
-false
   
 
   
@@ -449,7 +448,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
 
-  
+  
 
 
@@ -519,7 +518,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
 
-  
+  
 
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83a47912/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt 
b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index 563727b..de885db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -250,7 +250,4 @@ if (NOT WIN32)
 endif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
 endif(NOT WIN32)
 
-IF(REQUIRE_LIBWEBHDFS)
-add_subdirectory(contrib/libwebhdfs)
-ENDIF(REQUIRE_LIBWEBHDFS)
 add_subdirectory(main/native/fuse-dfs)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83a47912/hadoop-hdfs-project/hadoop-hdfs/src

[1/2] hadoop git commit: HDFS-9047. Retire libwebhdfs.

2016-01-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4c09a75f3 -> 83a479124


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83a47912/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
deleted file mode 100644
index c5f2f9c..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libwebhdfs/src/hdfs_json_parser.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef _HDFS_JSON_PARSER_H_
-#define _HDFS_JSON_PARSER_H_
-
-/**
- * Parse the response for MKDIR request. The response uses TRUE/FALSE 
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseMKDIR(const char *response);
-
-/**
- * Parse the response for RENAME request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseRENAME(const char *response);
-
-/**
- * Parse the response for DELETE request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseDELETE(const char *response);
-
-/**
- * Parse the response for SETREPLICATION request. The response uses TRUE/FALSE
- * to indicate whether the operation succeeded.
- *
- * @param response  The response information to parse.
- * @return 0 for success
- */
-int parseSETREPLICATION(const char *response);
-
-/**
- * Parse the response for OPEN (read) request. A successful operation 
- * will return "200 OK".
- *
- * @param response  The response information for parsing
- * @return  0 for success , -1 for out of range, other values for error
- */
-int parseOPEN(const char *header, const char *content);
-
-/**
- * Parse the response for WRITE (from NameNode) request. 
- * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message 
- *  sent from NameNode is stored in content
- * @return  0 for success
- */
-int parseNnWRITE(const char *header, const char *content);
-
-/**
- * Parse the response for WRITE (from DataNode) request. 
- * A successful operation should return "201 Created" in its header.
- * 
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from DataNode is stored in content
- * @return  0 for success
- */
-int parseDnWRITE(const char *header, const char *content);
-
-/**
- * Parse the response for APPEND (sent from NameNode) request.
- * A successful operation should return "307 TEMPORARY_REDIRECT" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from NameNode is stored in content
- * @return  0 for success
- */
-int parseNnAPPEND(const char *header, const char *content);
-
-/**
- * Parse the response for APPEND (from DataNode) request.
- * A successful operation should return "200 OK" in its header.
- *
- * @param headerThe header of the http response
- * @param content   If failing, the exception message
- *  sent from DataNode is stored in content
- * @return  0 for success
- */
-int parseDnAPPEND(const char *header, const char *content);
-
-/**
- * Parse the response (from NameNode) to get the location information 
- * of the DataNode that should be contacted for the following write operation.
- *
- * @param content   Content of the http header
- * @param dnTo store the location of the DataNode for writing
- * @return  0 for success
- */
-int parseDnLoc(char *content, char **dn) __attribute__ ((warn_unused_result));
-
-/**
- * Parse the response for GETFILESTATUS operation

hadoop git commit: Supplement to HDFS-9047.

2016-01-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk c0537bcd2 -> c722b6290


Supplement to HDFS-9047.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c722b629
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c722b629
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c722b629

Branch: refs/heads/trunk
Commit: c722b62908984f8fb6ab2e0bfd40c090e8c830c7
Parents: c0537bc
Author: Kihwal Lee 
Authored: Wed Jan 13 11:07:52 2016 -0600
Committer: Kihwal Lee 
Committed: Wed Jan 13 11:07:52 2016 -0600

--
 BUILDING.txt | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c722b629/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 91281be..4399ff0 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -11,7 +11,6 @@ Requirements:
 * CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
 * Zlib devel (if compiling native code)
 * openssl devel (if compiling native hadoop-pipes and to get the best HDFS 
encryption performance)
-* Jansson C XML parsing library (if compiling libwebhdfs)
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling 
fuse_dfs)
 * Internet connection for first build (to fetch all Maven and Hadoop 
dependencies)
 * python (for releasedocs)



hadoop git commit: Supplement to HDFS-9047. (cherry picked from commit c722b62908984f8fb6ab2e0bfd40c090e8c830c7)

2016-01-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c0b97865e -> 2ea99ee35


Supplement to HDFS-9047.
(cherry picked from commit c722b62908984f8fb6ab2e0bfd40c090e8c830c7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ea99ee3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ea99ee3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ea99ee3

Branch: refs/heads/branch-2
Commit: 2ea99ee35fa0aae23ddbb8b6c8bca9ccb62d4343
Parents: c0b9786
Author: Kihwal Lee 
Authored: Wed Jan 13 11:09:17 2016 -0600
Committer: Kihwal Lee 
Committed: Wed Jan 13 11:09:17 2016 -0600

--
 BUILDING.txt | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ea99ee3/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 1492a10..d8cd210 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -11,7 +11,6 @@ Requirements:
 * CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
 * Zlib devel (if compiling native code)
 * openssl devel (if compiling native hadoop-pipes and to get the best HDFS 
encryption performance)
-* Jansson C XML parsing library (if compiling libwebhdfs)
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling 
fuse_dfs)
 * Internet connection for first build (to fetch all Maven and Hadoop 
dependencies)
 



hadoop git commit: Supplement to HDFS-9047. (cherry picked from commit c722b62908984f8fb6ab2e0bfd40c090e8c830c7)

2016-01-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a22a7f61e -> 8df0db3d1


Supplement to HDFS-9047.
(cherry picked from commit c722b62908984f8fb6ab2e0bfd40c090e8c830c7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8df0db3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8df0db3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8df0db3d

Branch: refs/heads/branch-2.8
Commit: 8df0db3d189e1bdf293b2ee86c6ea4e615e2492c
Parents: a22a7f6
Author: Kihwal Lee 
Authored: Wed Jan 13 11:09:50 2016 -0600
Committer: Kihwal Lee 
Committed: Wed Jan 13 11:09:50 2016 -0600

--
 BUILDING.txt | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8df0db3d/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 1492a10..d8cd210 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -11,7 +11,6 @@ Requirements:
 * CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
 * Zlib devel (if compiling native code)
 * openssl devel (if compiling native hadoop-pipes and to get the best HDFS 
encryption performance)
-* Jansson C XML parsing library (if compiling libwebhdfs)
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling 
fuse_dfs)
 * Internet connection for first build (to fetch all Maven and Hadoop 
dependencies)
 



hadoop git commit: HDFS-9574. Reduce client failures during datanode restart. Contributed by Kihwal Lee. (cherry picked from commit b06e39de4fc4f9c35afb472eef0bba2adf91954f)

2016-01-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7.2 109c7545d -> 82ec5dbb2


HDFS-9574. Reduce client failures during datanode restart. Contributed by 
Kihwal Lee.
(cherry picked from commit b06e39de4fc4f9c35afb472eef0bba2adf91954f)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82ec5dbb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82ec5dbb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82ec5dbb

Branch: refs/heads/branch-2.7.2
Commit: 82ec5dbb2505066da8a6ed008d943b5ada027b15
Parents: 109c754
Author: Kihwal Lee 
Authored: Thu Jan 14 09:55:48 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Jan 14 09:55:48 2016 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  60 +++--
 .../hadoop/hdfs/server/datanode/DNConf.java |  12 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  11 +-
 .../server/datanode/DataNodeFaultInjector.java  |   2 +
 .../hdfs/server/datanode/DataXceiver.java   | 122 +++
 .../src/main/resources/hdfs-default.xml |  10 ++
 .../fsdataset/impl/TestDatanodeRestart.java |  73 +++
 9 files changed, 231 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ec5dbb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d92cb5e..eadac75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -16,6 +16,8 @@ Release 2.7.2 - 2015-12-16
 HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
 values() since it creates a temporary array. (Staffan Friberg via yliu)
 
+HDFS-9574. Reduce client failures during datanode restart (kihwal)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ec5dbb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3bb8a0a..c2a5296 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -492,6 +492,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY 
= "dfs.datanode.min.supported.namenode.version";
   public static final String  
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "2.1.0-beta";
   public static final String  DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY = 
"dfs.namenode.inode.attributes.provider.class";
+  public static final String  DFS_DATANODE_BP_READY_TIMEOUT_KEY = 
"dfs.datanode.bp-ready.timeout";
+  public static final longDFS_DATANODE_BP_READY_TIMEOUT_DEFAULT = 20;
 
   public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = 
"dfs.block.access.token.enable";
   public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ec5dbb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 9f7b15c..19dde1f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -71,10 +72,12 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hado

hadoop git commit: Fix up CHANGES.txt

2016-01-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c2d1d6952 -> a90dcb086


Fix up CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a90dcb08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a90dcb08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a90dcb08

Branch: refs/heads/branch-2.7
Commit: a90dcb0865d28afdf7fd2b265bf54eef9fe7ca2b
Parents: c2d1d69
Author: Kihwal Lee 
Authored: Thu Jan 14 09:58:35 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Jan 14 09:58:35 2016 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a90dcb08/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a32354d..9d9d7ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -14,8 +14,6 @@ Release 2.7.3 - UNRELEASED
 HDFS-7163. WebHdfsFileSystem should retry reads according to the configured
 retry policy. (Eric Payne via kihwal)
 
-HDFS-9574. Reduce client failures during datanode restart (kihwal)
-
 HDFS-9047. Retire libwebhdfs (kihwal)
 
 HDFS-9569. Log the name of the fsimage being loaded for better
@@ -71,6 +69,8 @@ Release 2.7.2 - UNRELEASED
 HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
 values() since it creates a temporary array. (Staffan Friberg via yliu)
 
+HDFS-9574. Reduce client failures during datanode restart (kihwal)
+
   OPTIMIZATIONS
 
 HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)



hadoop git commit: HDFS-9634. webhdfs client side exceptions don't provide enough details. Contributed by Eric Payne.

2016-01-21 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 446987e20 -> 3616c7b85


HDFS-9634. webhdfs client side exceptions don't provide enough details. 
Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3616c7b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3616c7b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3616c7b8

Branch: refs/heads/trunk
Commit: 3616c7b855962014750a3259a64c6e2a147da884
Parents: 446987e
Author: Kihwal Lee 
Authored: Wed Jan 20 13:18:25 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Jan 21 09:32:57 2016 -0600

--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 17 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 14 +++---
 3 files changed, 26 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3616c7b8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 1fa95c2..f9c2c6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -24,6 +24,7 @@ import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.lang.reflect.InvocationTargetException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
@@ -502,6 +503,7 @@ public class WebHdfsFileSystem extends FileSystem
 new ExcludeDatanodesParam("");
 
 private boolean checkRetry;
+private String redirectHost;
 
 protected AbstractRunner(final HttpOpParam.Op op, boolean redirected) {
   this.op = op;
@@ -553,7 +555,7 @@ public class WebHdfsFileSystem extends FileSystem
  */
 protected HttpURLConnection connect(URL url) throws IOException {
   //redirect hostname and port
-  String redirectHost = null;
+  redirectHost = null;
 
 
   // resolve redirects for a DN operation unless already resolved
@@ -658,6 +660,19 @@ public class WebHdfsFileSystem extends FileSystem
 throw it;
   }
 } catch (IOException ioe) {
+  // Attempt to include the redirected node in the exception. If the
+  // attempt to recreate the exception fails, just use the original.
+  String node = redirectHost;
+  if (node == null) {
+node = url.getAuthority();
+  }
+  try {
+  ioe = ioe.getClass().getConstructor(String.class)
+.newInstance(node + ": " + ioe.getMessage());
+  } catch (NoSuchMethodException | SecurityException 
+   | InstantiationException | IllegalAccessException
+   | IllegalArgumentException | InvocationTargetException e) {
+  }
   shouldRetry(ioe, retry);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3616c7b8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f1e6bdc..f41537e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2624,6 +2624,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9569. Log the name of the fsimage being loaded for better
 supportability. (Yongjun Zhang)
 
+HDFS-9634. webhdfs client side exceptions don't provide enough details
+(Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3616c7b8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index bd4d693..3a87d42 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -115,7 +115,7 @@

hadoop git commit: HDFS-9634. webhdfs client side exceptions don't provide enough details. Contributed by Eric Payne. (cherry picked from commit 7b70500484574a565dd8cd5c7d8b5bc7c6d91154)

2016-01-21 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c13929b38 -> 3d8d8f149


HDFS-9634. webhdfs client side exceptions don't provide enough details. 
Contributed by Eric Payne.
(cherry picked from commit 7b70500484574a565dd8cd5c7d8b5bc7c6d91154)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d8d8f14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d8d8f14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d8d8f14

Branch: refs/heads/branch-2
Commit: 3d8d8f1497df0ef02f23077d67a41cd5853ef8fa
Parents: c13929b
Author: Kihwal Lee 
Authored: Wed Jan 20 13:19:29 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Jan 21 09:33:30 2016 -0600

--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 17 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 14 +++---
 3 files changed, 26 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d8d8f14/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 096ba7b..cc22040 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -24,6 +24,7 @@ import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.lang.reflect.InvocationTargetException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
@@ -511,6 +512,7 @@ public class WebHdfsFileSystem extends FileSystem
 new ExcludeDatanodesParam("");
 
 private boolean checkRetry;
+private String redirectHost;
 
 protected AbstractRunner(final HttpOpParam.Op op, boolean redirected) {
   this.op = op;
@@ -562,7 +564,7 @@ public class WebHdfsFileSystem extends FileSystem
  */
 protected HttpURLConnection connect(URL url) throws IOException {
   //redirect hostname and port
-  String redirectHost = null;
+  redirectHost = null;
 
 
   // resolve redirects for a DN operation unless already resolved
@@ -667,6 +669,19 @@ public class WebHdfsFileSystem extends FileSystem
 throw it;
   }
 } catch (IOException ioe) {
+  // Attempt to include the redirected node in the exception. If the
+  // attempt to recreate the exception fails, just use the original.
+  String node = redirectHost;
+  if (node == null) {
+node = url.getAuthority();
+  }
+  try {
+  ioe = ioe.getClass().getConstructor(String.class)
+.newInstance(node + ": " + ioe.getMessage());
+  } catch (NoSuchMethodException | SecurityException 
+   | InstantiationException | IllegalAccessException
+   | IllegalArgumentException | InvocationTargetException e) {
+  }
   shouldRetry(ioe, retry);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d8d8f14/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b3351e9..d426c16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1741,6 +1741,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9569. Log the name of the fsimage being loaded for better
 supportability. (Yongjun Zhang)
 
+HDFS-9634. webhdfs client side exceptions don't provide enough details
+(Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d8d8f14/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index bd4d693..3a87d42 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/tes

hadoop git commit: HDFS-9634. webhdfs client side exceptions don't provide enough details. Contributed by Eric Payne. (cherry picked from commit 7b70500484574a565dd8cd5c7d8b5bc7c6d91154)

2016-01-21 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 ca2993340 -> e160ddfb8


HDFS-9634. webhdfs client side exceptions don't provide enough details. 
Contributed by Eric Payne.
(cherry picked from commit 7b70500484574a565dd8cd5c7d8b5bc7c6d91154)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e160ddfb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e160ddfb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e160ddfb

Branch: refs/heads/branch-2.7
Commit: e160ddfb8d7ef3acbe22cde850b48671719f8338
Parents: ca29933
Author: Kihwal Lee 
Authored: Wed Jan 20 15:33:59 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Jan 21 09:34:29 2016 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 17 -
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 14 +++---
 3 files changed, 26 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e160ddfb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 67e13a5..69bb220 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -22,6 +22,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9415. Document dfs.cluster.administrators and
 dfs.permissions.superusergroup. (Xiaobing Zhou via Arpit Agarwal)
 
+HDFS-9634. webhdfs client side exceptions don't provide enough details
+(Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e160ddfb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index a7bf6a9..b738050 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -24,6 +24,7 @@ import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.lang.reflect.InvocationTargetException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
@@ -474,6 +475,7 @@ public class WebHdfsFileSystem extends FileSystem
 protected ExcludeDatanodesParam excludeDatanodes = new 
ExcludeDatanodesParam("");
 
 private boolean checkRetry;
+private String redirectHost;
 
 protected AbstractRunner(final HttpOpParam.Op op, boolean redirected) {
   this.op = op;
@@ -524,7 +526,7 @@ public class WebHdfsFileSystem extends FileSystem
  */
 protected HttpURLConnection connect(URL url) throws IOException {
   //redirect hostname and port
-  String redirectHost = null;
+  redirectHost = null;
 
   
   // resolve redirects for a DN operation unless already resolved
@@ -630,6 +632,19 @@ public class WebHdfsFileSystem extends FileSystem
 throw it;
   }
 } catch (IOException ioe) {
+  // Attempt to include the redirected node in the exception. If the
+  // attempt to recreate the exception fails, just use the original.
+  String node = redirectHost;
+  if (node == null) {
+node = url.getAuthority();
+  }
+  try {
+  ioe = ioe.getClass().getConstructor(String.class)
+.newInstance(node + ": " + ioe.getMessage());
+  } catch (NoSuchMethodException | SecurityException 
+   | InstantiationException | IllegalAccessException
+   | IllegalArgumentException | InvocationTargetException e) {
+  }
   shouldRetry(ioe, retry);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e160ddfb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index 13a5a53..5419093 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/tes

hadoop git commit: HDFS-9634. webhdfs client side exceptions don't provide enough details. Contributed by Eric Payne. (cherry picked from commit 7b70500484574a565dd8cd5c7d8b5bc7c6d91154)

2016-01-21 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a02fab85e -> 67ba20e36


HDFS-9634. webhdfs client side exceptions don't provide enough details. 
Contributed by Eric Payne.
(cherry picked from commit 7b70500484574a565dd8cd5c7d8b5bc7c6d91154)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67ba20e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67ba20e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67ba20e3

Branch: refs/heads/branch-2.8
Commit: 67ba20e364f761b8d23fea8916f2f02572e14fa5
Parents: a02fab8
Author: Kihwal Lee 
Authored: Wed Jan 20 13:19:44 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Jan 21 09:34:06 2016 -0600

--
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 17 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  3 +++
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 14 +++---
 3 files changed, 26 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67ba20e3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 096ba7b..cc22040 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -24,6 +24,7 @@ import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.lang.reflect.InvocationTargetException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
@@ -511,6 +512,7 @@ public class WebHdfsFileSystem extends FileSystem
 new ExcludeDatanodesParam("");
 
 private boolean checkRetry;
+private String redirectHost;
 
 protected AbstractRunner(final HttpOpParam.Op op, boolean redirected) {
   this.op = op;
@@ -562,7 +564,7 @@ public class WebHdfsFileSystem extends FileSystem
  */
 protected HttpURLConnection connect(URL url) throws IOException {
   //redirect hostname and port
-  String redirectHost = null;
+  redirectHost = null;
 
 
   // resolve redirects for a DN operation unless already resolved
@@ -667,6 +669,19 @@ public class WebHdfsFileSystem extends FileSystem
 throw it;
   }
 } catch (IOException ioe) {
+  // Attempt to include the redirected node in the exception. If the
+  // attempt to recreate the exception fails, just use the original.
+  String node = redirectHost;
+  if (node == null) {
+node = url.getAuthority();
+  }
+  try {
+  ioe = ioe.getClass().getConstructor(String.class)
+.newInstance(node + ": " + ioe.getMessage());
+  } catch (NoSuchMethodException | SecurityException 
+   | InstantiationException | IllegalAccessException
+   | IllegalArgumentException | InvocationTargetException e) {
+  }
   shouldRetry(ioe, retry);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67ba20e3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 39dff21..9f29da3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1685,6 +1685,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9569. Log the name of the fsimage being loaded for better
 supportability. (Yongjun Zhang)
 
+HDFS-9634. webhdfs client side exceptions don't provide enough details
+(Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67ba20e3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index bd4d693..3a87d42 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/tes

[2/2] hadoop git commit: HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma.

2016-01-22 Thread kihwal
HDFS-8898. Create API and command-line argument to get quota and quota usage 
without detailed content summary. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fd19b96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fd19b96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fd19b96

Branch: refs/heads/trunk
Commit: 2fd19b9674420e025af54a5bed12eb96478f8c48
Parents: d6258b3
Author: Kihwal Lee 
Authored: Thu Jan 21 12:04:14 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 22 09:10:06 2016 -0600

--
 .../org/apache/hadoop/fs/ContentSummary.java| 241 -
 .../java/org/apache/hadoop/fs/FileSystem.java   |   7 +
 .../java/org/apache/hadoop/fs/QuotaUsage.java   | 359 +++
 .../java/org/apache/hadoop/fs/shell/Count.java  |  37 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|   7 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  15 +-
 .../src/site/markdown/FileSystemShell.md|  11 +-
 .../org/apache/hadoop/cli/CLITestHelper.java|   2 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   3 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   1 +
 .../org/apache/hadoop/fs/TestQuotaUsage.java| 146 
 .../org/apache/hadoop/fs/shell/TestCount.java   | 109 +-
 .../src/test/resources/testConf.xml |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  27 ++
 .../hadoop/hdfs/DistributedFileSystem.java  |  19 +
 .../hadoop/hdfs/protocol/ClientProtocol.java|  14 +
 .../ClientNamenodeProtocolTranslatorPB.java |  14 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  79 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  10 +
 .../src/main/proto/hdfs.proto   |  11 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  16 +
 .../server/namenode/FSDirStatAndListingOp.java  |  53 +++
 .../hdfs/server/namenode/FSNamesystem.java  |  31 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +
 .../fs/viewfs/TestViewFsDefaultValue.java   |  53 ++-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |  66 +++-
 .../server/namenode/ha/TestQuotasWithHA.java|  14 +-
 28 files changed, 1125 insertions(+), 232 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd19b96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 678ce7f..3dedbcc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -30,26 +30,15 @@ import org.apache.hadoop.util.StringUtils;
 /** Store the summary of a content (a directory or a file). */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class ContentSummary implements Writable{
+public class ContentSummary extends QuotaUsage implements Writable{
   private long length;
   private long fileCount;
   private long directoryCount;
-  private long quota;
-  private long spaceConsumed;
-  private long spaceQuota;
-  private long typeConsumed[];
-  private long typeQuota[];
 
-  public static class Builder{
+  /** We don't use generics. Instead override spaceConsumed and other methods
+  in order to keep backward compatibility. */
+  public static class Builder extends QuotaUsage.Builder {
 public Builder() {
-  this.quota = -1;
-  this.spaceQuota = -1;
-
-  typeConsumed = new long[StorageType.values().length];
-  typeQuota = new long[StorageType.values().length];
-  for (int i = 0; i < typeQuota.length; i++) {
-typeQuota[i] = -1;
-  }
 }
 
 public Builder length(long length) {
@@ -67,58 +56,57 @@ public class ContentSummary implements Writable{
   return this;
 }
 
+@Override
 public Builder quota(long quota){
-  this.quota = quota;
+  super.quota(quota);
   return this;
 }
 
+@Override
 public Builder spaceConsumed(long spaceConsumed) {
-  this.spaceConsumed = spaceConsumed;
+  super.spaceConsumed(spaceConsumed);
   return this;
 }
 
+@Override
 public Builder spaceQuota(long spaceQuota) {
-  this.spaceQuota = spaceQuota;
+  super.spaceQuota(spaceQuota);
   return this;
 }
 
+@Override
 public Builder typeConsumed(long typeConsumed[]) {
-  for (int i = 0; i < typeConsumed.length; i++) {
-this.typeConsumed[i] = typ

[1/2] hadoop git commit: HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma.

2016-01-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk d6258b33a -> 2fd19b967


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd19b96/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 961ae0e..9798787 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -134,6 +135,7 @@ public class TestQuota {
   
   // 4: count -q /test
   ContentSummary c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
   assertEquals(c.getQuota(), 3);
   assertEquals(c.getSpaceConsumed(), fileLen*replication);
@@ -141,10 +143,12 @@ public class TestQuota {
   
   // 5: count -q /test/data0
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
   assertEquals(c.getQuota(), -1);
   // check disk space consumed
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getSpaceConsumed(), fileLen*replication);
 
   // 6: create a directory /test/data1
@@ -172,12 +176,14 @@ public class TestQuota {
   // 8: clear quota /test
   runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getQuota(), -1);
   assertEquals(c.getSpaceQuota(), spaceQuota);
   
   // 9: clear quota /test/data0
   runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, 
false);
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getQuota(), -1);
   
   // 10: create a file /test/datafile1
@@ -198,6 +204,7 @@ public class TestQuota {
   // 9.s: clear diskspace quota
   runCommand(admin, false, "-clrSpaceQuota", parent.toString());
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getQuota(), -1);
   assertEquals(c.getSpaceQuota(), -1);   
   
@@ -224,6 +231,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
   assertEquals(c.getQuota(), 1);
   
@@ -362,7 +370,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
 
-  assertEquals(4, 
cluster.getNamesystem().getFSDirectory().getYieldCount());
+  assertEquals(5, 
cluster.getNamesystem().getFSDirectory().getYieldCount());
 } finally {
   cluster.shutdown();
 }
@@ -387,6 +395,7 @@ public class TestQuota {
   final Path quotaDir1 = new Path("/nqdir0/qdir1");
   dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
   ContentSummary c = dfs.getContentSummary(quotaDir1);
+  compareQuotaUsage(c, dfs, quotaDir1);
   assertEquals(c.getDirectoryCount(), 3);
   assertEquals(c.getQuota(), 6);
 
@@ -394,6 +403,7 @@ public class TestQuota {
   final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
   dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
   c = dfs.getContentSummary(quotaDir2);
+  compareQuotaUsage(c, dfs, quotaDir2);
   assertEquals(c.getDirectoryCount(), 2);
   assertEquals(c.getQuota(), 7);
 
@@ -402,6 +412,7 @@ public class TestQuota {
   assertTrue(dfs.mkdirs(quotaDir3));
   dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
   c = dfs.getContentSummary(quotaDir3);
+  compareQuotaUsage(c, dfs, quotaDir3);
   assertEquals(c.getDirectoryCount(), 1);
   assertEquals(c.getQuota(), 2);
 
@@ -409,6 +420,7 @@ public class TestQuota {
   Path tempPath = new Path(quotaDir3, "nqdir32");
   assertTrue(dfs.mkdirs(tempPath));
   c = dfs.getContentSummary(quotaDir3);
+  compareQuotaUsage(c, dfs, quotaDir3);
   assertEquals(c.getDirectoryCount(), 2);
   assertEquals(c.getQuota(), 2);
 
@@ -422,6 +434,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
   c = dfs.getContentSummary(quotaDir3);

[2/2] hadoop git commit: HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma. (cherry picked from commit b5db1d44104a8

2016-01-22 Thread kihwal
HDFS-8898. Create API and command-line argument to get quota and quota usage 
without detailed content summary. Contributed by Ming Ma.
(cherry picked from commit b5db1d44104a8ac4c3643cc3ac841f75f31c8345)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33087668
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33087668
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33087668

Branch: refs/heads/branch-2.8
Commit: 330876689b54910990643b34e98e7f65126db5c7
Parents: 2097016
Author: Kihwal Lee 
Authored: Fri Jan 22 09:20:39 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 22 09:20:39 2016 -0600

--
 .../org/apache/hadoop/fs/ContentSummary.java| 241 -
 .../java/org/apache/hadoop/fs/FileSystem.java   |   7 +
 .../java/org/apache/hadoop/fs/QuotaUsage.java   | 359 +++
 .../java/org/apache/hadoop/fs/shell/Count.java  |  37 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|   7 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  15 +-
 .../src/site/markdown/FileSystemShell.md|  13 +-
 .../org/apache/hadoop/cli/CLITestHelper.java|   2 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   6 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   1 +
 .../org/apache/hadoop/fs/TestQuotaUsage.java| 146 
 .../org/apache/hadoop/fs/shell/TestCount.java   | 109 +-
 .../src/test/resources/testConf.xml |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  28 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  19 +
 .../hadoop/hdfs/protocol/ClientProtocol.java|  14 +
 .../ClientNamenodeProtocolTranslatorPB.java |  14 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  79 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  10 +
 .../src/main/proto/hdfs.proto   |  11 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  16 +
 .../server/namenode/FSDirStatAndListingOp.java  |  53 +++
 .../hdfs/server/namenode/FSNamesystem.java  |  31 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |  63 +++-
 .../server/namenode/ha/TestQuotasWithHA.java|  14 +-
 27 files changed, 1072 insertions(+), 235 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33087668/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index ccd6960..3dedbcc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -30,26 +30,15 @@ import org.apache.hadoop.util.StringUtils;
 /** Store the summary of a content (a directory or a file). */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class ContentSummary implements Writable{
+public class ContentSummary extends QuotaUsage implements Writable{
   private long length;
   private long fileCount;
   private long directoryCount;
-  private long quota;
-  private long spaceConsumed;
-  private long spaceQuota;
-  private long typeConsumed[];
-  private long typeQuota[];
 
-  public static class Builder{
+  /** We don't use generics. Instead override spaceConsumed and other methods
+  in order to keep backward compatibility. */
+  public static class Builder extends QuotaUsage.Builder {
 public Builder() {
-  this.quota = -1;
-  this.spaceQuota = -1;
-
-  typeConsumed = new long[StorageType.values().length];
-  typeQuota = new long[StorageType.values().length];
-  for (int i = 0; i < typeQuota.length; i++) {
-typeQuota[i] = -1;
-  }
 }
 
 public Builder length(long length) {
@@ -67,58 +56,57 @@ public class ContentSummary implements Writable{
   return this;
 }
 
+@Override
 public Builder quota(long quota){
-  this.quota = quota;
+  super.quota(quota);
   return this;
 }
 
+@Override
 public Builder spaceConsumed(long spaceConsumed) {
-  this.spaceConsumed = spaceConsumed;
+  super.spaceConsumed(spaceConsumed);
   return this;
 }
 
+@Override
 public Builder spaceQuota(long spaceQuota) {
-  this.spaceQuota = spaceQuota;
+  super.spaceQuota(spaceQuota);
   return this;
 }
 
+@Override
 public Builder typeConsumed(long typeConsumed[]) {
-  for (int i = 0; i < typeConsumed.length; i++) {
-this.typeConsumed[i] = typ

[1/2] hadoop git commit: HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma.

2016-01-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 493275b27 -> b5db1d441


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5db1d44/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index d09b8b5..aee3c46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -21,6 +21,7 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -33,6 +34,7 @@ import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -133,6 +135,7 @@ public class TestQuota {
   
   // 4: count -q /test
   ContentSummary c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
   assertEquals(c.getQuota(), 3);
   assertEquals(c.getSpaceConsumed(), fileLen*replication);
@@ -140,10 +143,12 @@ public class TestQuota {
   
   // 5: count -q /test/data0
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
   assertEquals(c.getQuota(), -1);
   // check disk space consumed
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getSpaceConsumed(), fileLen*replication);
 
   // 6: create a directory /test/data1
@@ -171,12 +176,14 @@ public class TestQuota {
   // 8: clear quota /test
   runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getQuota(), -1);
   assertEquals(c.getSpaceQuota(), spaceQuota);
   
   // 9: clear quota /test/data0
   runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, 
false);
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getQuota(), -1);
   
   // 10: create a file /test/datafile1
@@ -197,6 +204,7 @@ public class TestQuota {
   // 9.s: clear diskspace quota
   runCommand(admin, false, "-clrSpaceQuota", parent.toString());
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getQuota(), -1);
   assertEquals(c.getSpaceQuota(), -1);   
   
@@ -223,6 +231,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
   assertEquals(c.getQuota(), 1);
   
@@ -361,7 +370,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
 
-  assertEquals(4, 
cluster.getNamesystem().getFSDirectory().getYieldCount());
+  assertEquals(5, 
cluster.getNamesystem().getFSDirectory().getYieldCount());
 } finally {
   cluster.shutdown();
 }
@@ -386,6 +395,7 @@ public class TestQuota {
   final Path quotaDir1 = new Path("/nqdir0/qdir1");
   dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
   ContentSummary c = dfs.getContentSummary(quotaDir1);
+  compareQuotaUsage(c, dfs, quotaDir1);
   assertEquals(c.getDirectoryCount(), 3);
   assertEquals(c.getQuota(), 6);
 
@@ -393,6 +403,7 @@ public class TestQuota {
   final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
   dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
   c = dfs.getContentSummary(quotaDir2);
+  compareQuotaUsage(c, dfs, quotaDir2);
   assertEquals(c.getDirectoryCount(), 2);
   assertEquals(c.getQuota(), 7);
 
@@ -401,6 +412,7 @@ public class TestQuota {
   assertTrue(dfs.mkdirs(quotaDir3));
   dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
   c = dfs.getContentSummary(quotaDir3);
+  compareQuotaUsage(c, dfs, quotaDir3);
   assertEquals(c.getDirectoryCount(), 1);
   assertEquals(c.getQuota(), 2);
 
@@ -408,6 +420,7 @@ public class TestQuota {
   Path tempPath = new Path(quotaDir3, "nqdir32")

[2/2] hadoop git commit: HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma.

2016-01-22 Thread kihwal
HDFS-8898. Create API and command-line argument to get quota and quota usage 
without detailed content summary. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5db1d44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5db1d44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5db1d44

Branch: refs/heads/branch-2
Commit: b5db1d44104a8ac4c3643cc3ac841f75f31c8345
Parents: 493275b
Author: Kihwal Lee 
Authored: Fri Jan 22 09:19:23 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 22 09:19:23 2016 -0600

--
 .../org/apache/hadoop/fs/ContentSummary.java| 241 -
 .../java/org/apache/hadoop/fs/FileSystem.java   |   7 +
 .../java/org/apache/hadoop/fs/QuotaUsage.java   | 359 +++
 .../java/org/apache/hadoop/fs/shell/Count.java  |  37 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|   7 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  15 +-
 .../src/site/markdown/FileSystemShell.md|  13 +-
 .../org/apache/hadoop/cli/CLITestHelper.java|   2 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   6 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   1 +
 .../org/apache/hadoop/fs/TestQuotaUsage.java| 146 
 .../org/apache/hadoop/fs/shell/TestCount.java   | 109 +-
 .../src/test/resources/testConf.xml |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  28 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  19 +
 .../hadoop/hdfs/protocol/ClientProtocol.java|  14 +
 .../ClientNamenodeProtocolTranslatorPB.java |  14 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  79 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  10 +
 .../src/main/proto/hdfs.proto   |  11 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  16 +
 .../server/namenode/FSDirStatAndListingOp.java  |  53 +++
 .../hdfs/server/namenode/FSNamesystem.java  |  31 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |  63 +++-
 .../server/namenode/ha/TestQuotasWithHA.java|  14 +-
 27 files changed, 1072 insertions(+), 235 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5db1d44/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index ccd6960..3dedbcc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -30,26 +30,15 @@ import org.apache.hadoop.util.StringUtils;
 /** Store the summary of a content (a directory or a file). */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class ContentSummary implements Writable{
+public class ContentSummary extends QuotaUsage implements Writable{
   private long length;
   private long fileCount;
   private long directoryCount;
-  private long quota;
-  private long spaceConsumed;
-  private long spaceQuota;
-  private long typeConsumed[];
-  private long typeQuota[];
 
-  public static class Builder{
+  /** We don't use generics. Instead override spaceConsumed and other methods
+  in order to keep backward compatibility. */
+  public static class Builder extends QuotaUsage.Builder {
 public Builder() {
-  this.quota = -1;
-  this.spaceQuota = -1;
-
-  typeConsumed = new long[StorageType.values().length];
-  typeQuota = new long[StorageType.values().length];
-  for (int i = 0; i < typeQuota.length; i++) {
-typeQuota[i] = -1;
-  }
 }
 
 public Builder length(long length) {
@@ -67,58 +56,57 @@ public class ContentSummary implements Writable{
   return this;
 }
 
+@Override
 public Builder quota(long quota){
-  this.quota = quota;
+  super.quota(quota);
   return this;
 }
 
+@Override
 public Builder spaceConsumed(long spaceConsumed) {
-  this.spaceConsumed = spaceConsumed;
+  super.spaceConsumed(spaceConsumed);
   return this;
 }
 
+@Override
 public Builder spaceQuota(long spaceQuota) {
-  this.spaceQuota = spaceQuota;
+  super.spaceQuota(spaceQuota);
   return this;
 }
 
+@Override
 public Builder typeConsumed(long typeConsumed[]) {
-  for (int i = 0; i < typeConsumed.length; i++) {
-this.typeConsumed[i] = typeConsumed[i];
-  }
+  super.typeConsumed(typeConsumed);
   

[1/2] hadoop git commit: HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma. (cherry picked from commit b5db1d44104a8

2016-01-22 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2097016ab -> 330876689


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33087668/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index d09b8b5..aee3c46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -21,6 +21,7 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -33,6 +34,7 @@ import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -133,6 +135,7 @@ public class TestQuota {
   
   // 4: count -q /test
   ContentSummary c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
   assertEquals(c.getQuota(), 3);
   assertEquals(c.getSpaceConsumed(), fileLen*replication);
@@ -140,10 +143,12 @@ public class TestQuota {
   
   // 5: count -q /test/data0
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
   assertEquals(c.getQuota(), -1);
   // check disk space consumed
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getSpaceConsumed(), fileLen*replication);
 
   // 6: create a directory /test/data1
@@ -171,12 +176,14 @@ public class TestQuota {
   // 8: clear quota /test
   runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getQuota(), -1);
   assertEquals(c.getSpaceQuota(), spaceQuota);
   
   // 9: clear quota /test/data0
   runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, 
false);
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getQuota(), -1);
   
   // 10: create a file /test/datafile1
@@ -197,6 +204,7 @@ public class TestQuota {
   // 9.s: clear diskspace quota
   runCommand(admin, false, "-clrSpaceQuota", parent.toString());
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getQuota(), -1);
   assertEquals(c.getSpaceQuota(), -1);   
   
@@ -223,6 +231,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
   assertEquals(c.getQuota(), 1);
   
@@ -361,7 +370,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
 
-  assertEquals(4, 
cluster.getNamesystem().getFSDirectory().getYieldCount());
+  assertEquals(5, 
cluster.getNamesystem().getFSDirectory().getYieldCount());
 } finally {
   cluster.shutdown();
 }
@@ -386,6 +395,7 @@ public class TestQuota {
   final Path quotaDir1 = new Path("/nqdir0/qdir1");
   dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
   ContentSummary c = dfs.getContentSummary(quotaDir1);
+  compareQuotaUsage(c, dfs, quotaDir1);
   assertEquals(c.getDirectoryCount(), 3);
   assertEquals(c.getQuota(), 6);
 
@@ -393,6 +403,7 @@ public class TestQuota {
   final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
   dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
   c = dfs.getContentSummary(quotaDir2);
+  compareQuotaUsage(c, dfs, quotaDir2);
   assertEquals(c.getDirectoryCount(), 2);
   assertEquals(c.getQuota(), 7);
 
@@ -401,6 +412,7 @@ public class TestQuota {
   assertTrue(dfs.mkdirs(quotaDir3));
   dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
   c = dfs.getContentSummary(quotaDir3);
+  compareQuotaUsage(c, dfs, quotaDir3);
   assertEquals(c.getDirectoryCount(), 1);
   assertEquals(c.getQuota(), 2);
 
@@ -408,6 +420,7 @@ public class TestQuota {
   Path tempPath = new Path(quotaDir3, "nqdir32

hadoop git commit: HDFS-8722. Optimize datanode writes for small writes and flushes. Contributed by Kihwal Lee

2015-07-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk b7fb6ec45 -> 59388a801


HDFS-8722. Optimize datanode writes for small writes and flushes. Contributed 
by Kihwal Lee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59388a80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59388a80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59388a80

Branch: refs/heads/trunk
Commit: 59388a801514d6af64ef27fbf246d8054f1dcc74
Parents: b7fb6ec
Author: Kihwal Lee 
Authored: Tue Jul 14 14:04:06 2015 -0500
Committer: Kihwal Lee 
Committed: Tue Jul 14 14:04:06 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/datanode/BlockReceiver.java | 34 +---
 2 files changed, 24 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59388a80/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 86b1ea1..14f3403 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1053,6 +1053,8 @@ Release 2.7.2 - UNRELEASED
 
   OPTIMIZATIONS
 
+  HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
+
   BUG FIXES
 
 Release 2.7.1 - 2015-07-06 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59388a80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2468f43..55c9d57 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -598,14 +598,19 @@ class BlockReceiver implements Closeable {
   // bytes should be skipped when writing the data and checksum
   // buffers out to disk.
   long partialChunkSizeOnDisk = onDiskLen % bytesPerChecksum;
+  long lastChunkBoundary = onDiskLen - partialChunkSizeOnDisk;
   boolean alignedOnDisk = partialChunkSizeOnDisk == 0;
   boolean alignedInPacket = firstByteInBlock % bytesPerChecksum == 0;
 
-  // Since data is always appended, not overwritten, partial CRC
-  // recalculation is necessary if the on-disk data is not chunk-
-  // aligned, regardless of whether the beginning of the data in
-  // the packet is chunk-aligned.
-  boolean doPartialCrc = !alignedOnDisk && !shouldNotWriteChecksum;
+  // If the end of the on-disk data is not chunk-aligned, the last
+  // checksum needs to be overwritten.
+  boolean overwriteLastCrc = !alignedOnDisk && !shouldNotWriteChecksum;
+  // If the starting offset of the packat data is at the last chunk
+  // boundary of the data on disk, the partial checksum recalculation
+  // can be skipped and the checksum supplied by the client can be used
+  // instead. This reduces disk reads and cpu load.
+  boolean doCrcRecalc = overwriteLastCrc &&
+  (lastChunkBoundary != firstByteInBlock);
 
   // If this is a partial chunk, then verify that this is the only
   // chunk in the packet. If the starting offset is not chunk
@@ -621,9 +626,10 @@ class BlockReceiver implements Closeable {
   // If the last portion of the block file is not a full chunk,
   // then read in pre-existing partial data chunk and recalculate
   // the checksum so that the checksum calculation can continue
-  // from the right state.
+  // from the right state. If the client provided the checksum for
+  // the whole chunk, this is not necessary.
   Checksum partialCrc = null;
-  if (doPartialCrc) {
+  if (doCrcRecalc) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("receivePacket for " + block 
   + ": previous write did not end at the chunk boundary."
@@ -659,8 +665,15 @@ class BlockReceiver implements Closeable {
 int skip = 0;
 byte[] crcBytes = null;
 
-// First, overwrite the partial crc at the end, if necessary.
-if (doPartialCrc) { // not chunk-aligned on disk
+// First, prepare to overwrite the partial crc at the end.
+if (o

hadoop git commit: HDFS-8722. Optimize datanode writes for small writes and flushes. Contributed by Kihwal Lee (cherry picked from commit 59388a801514d6af64ef27fbf246d8054f1dcc74)

2015-07-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 0744a578e -> e26321982


HDFS-8722. Optimize datanode writes for small writes and flushes. Contributed 
by Kihwal Lee
(cherry picked from commit 59388a801514d6af64ef27fbf246d8054f1dcc74)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2632198
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2632198
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2632198

Branch: refs/heads/branch-2.7
Commit: e263219829a1b945fabb17fb0f74be78a41e85d1
Parents: 0744a57
Author: Kihwal Lee 
Authored: Tue Jul 14 14:08:15 2015 -0500
Committer: Kihwal Lee 
Committed: Tue Jul 14 14:08:15 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/datanode/BlockReceiver.java | 34 +---
 2 files changed, 24 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2632198/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 094ec24..5cfb9a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -12,6 +12,8 @@ Release 2.7.2 - UNRELEASED
 
   OPTIMIZATIONS
 
+  HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
+
   BUG FIXES
 
 Release 2.7.1 - 2015-07-06 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2632198/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index bc6c540..8fba2ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -598,14 +598,19 @@ class BlockReceiver implements Closeable {
   // bytes should be skipped when writing the data and checksum
   // buffers out to disk.
   long partialChunkSizeOnDisk = onDiskLen % bytesPerChecksum;
+  long lastChunkBoundary = onDiskLen - partialChunkSizeOnDisk;
   boolean alignedOnDisk = partialChunkSizeOnDisk == 0;
   boolean alignedInPacket = firstByteInBlock % bytesPerChecksum == 0;
 
-  // Since data is always appended, not overwritten, partial CRC
-  // recalculation is necessary if the on-disk data is not chunk-
-  // aligned, regardless of whether the beginning of the data in
-  // the packet is chunk-aligned.
-  boolean doPartialCrc = !alignedOnDisk && !shouldNotWriteChecksum;
+  // If the end of the on-disk data is not chunk-aligned, the last
+  // checksum needs to be overwritten.
+  boolean overwriteLastCrc = !alignedOnDisk && !shouldNotWriteChecksum;
+  // If the starting offset of the packat data is at the last chunk
+  // boundary of the data on disk, the partial checksum recalculation
+  // can be skipped and the checksum supplied by the client can be used
+  // instead. This reduces disk reads and cpu load.
+  boolean doCrcRecalc = overwriteLastCrc &&
+  (lastChunkBoundary != firstByteInBlock);
 
   // If this is a partial chunk, then verify that this is the only
   // chunk in the packet. If the starting offset is not chunk
@@ -621,9 +626,10 @@ class BlockReceiver implements Closeable {
   // If the last portion of the block file is not a full chunk,
   // then read in pre-existing partial data chunk and recalculate
   // the checksum so that the checksum calculation can continue
-  // from the right state.
+  // from the right state. If the client provided the checksum for
+  // the whole chunk, this is not necessary.
   Checksum partialCrc = null;
-  if (doPartialCrc) {
+  if (doCrcRecalc) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("receivePacket for " + block 
   + ": previous write did not end at the chunk boundary."
@@ -659,8 +665,15 @@ class BlockReceiver implements Closeable {
 int skip = 0;
 byte[] crcBytes = null;
 
-// First, overwrite the partial crc at the end, if necessary.
-if (doPartialCrc) { // not chunk-aligned on disk
+// Fir

hadoop git commit: HDFS-8722. Optimize datanode writes for small writes and flushes. Contributed by Kihwal Lee (cherry picked from commit 59388a801514d6af64ef27fbf246d8054f1dcc74)

2015-07-14 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 471037883 -> b0a2dc9c8


HDFS-8722. Optimize datanode writes for small writes and flushes. Contributed 
by Kihwal Lee
(cherry picked from commit 59388a801514d6af64ef27fbf246d8054f1dcc74)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0a2dc9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0a2dc9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0a2dc9c

Branch: refs/heads/branch-2
Commit: b0a2dc9c8471337182162746d9298f08b39a5566
Parents: 4710378
Author: Kihwal Lee 
Authored: Tue Jul 14 14:07:38 2015 -0500
Committer: Kihwal Lee 
Committed: Tue Jul 14 14:07:38 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/datanode/BlockReceiver.java | 34 +---
 2 files changed, 24 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0a2dc9c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 434f487..403ed06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -717,6 +717,8 @@ Release 2.7.2 - UNRELEASED
 
   OPTIMIZATIONS
 
+  HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
+
   BUG FIXES
 
 Release 2.7.1 - 2015-07-06 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0a2dc9c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2468f43..55c9d57 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -598,14 +598,19 @@ class BlockReceiver implements Closeable {
   // bytes should be skipped when writing the data and checksum
   // buffers out to disk.
   long partialChunkSizeOnDisk = onDiskLen % bytesPerChecksum;
+  long lastChunkBoundary = onDiskLen - partialChunkSizeOnDisk;
   boolean alignedOnDisk = partialChunkSizeOnDisk == 0;
   boolean alignedInPacket = firstByteInBlock % bytesPerChecksum == 0;
 
-  // Since data is always appended, not overwritten, partial CRC
-  // recalculation is necessary if the on-disk data is not chunk-
-  // aligned, regardless of whether the beginning of the data in
-  // the packet is chunk-aligned.
-  boolean doPartialCrc = !alignedOnDisk && !shouldNotWriteChecksum;
+  // If the end of the on-disk data is not chunk-aligned, the last
+  // checksum needs to be overwritten.
+  boolean overwriteLastCrc = !alignedOnDisk && !shouldNotWriteChecksum;
+  // If the starting offset of the packat data is at the last chunk
+  // boundary of the data on disk, the partial checksum recalculation
+  // can be skipped and the checksum supplied by the client can be used
+  // instead. This reduces disk reads and cpu load.
+  boolean doCrcRecalc = overwriteLastCrc &&
+  (lastChunkBoundary != firstByteInBlock);
 
   // If this is a partial chunk, then verify that this is the only
   // chunk in the packet. If the starting offset is not chunk
@@ -621,9 +626,10 @@ class BlockReceiver implements Closeable {
   // If the last portion of the block file is not a full chunk,
   // then read in pre-existing partial data chunk and recalculate
   // the checksum so that the checksum calculation can continue
-  // from the right state.
+  // from the right state. If the client provided the checksum for
+  // the whole chunk, this is not necessary.
   Checksum partialCrc = null;
-  if (doPartialCrc) {
+  if (doCrcRecalc) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("receivePacket for " + block 
   + ": previous write did not end at the chunk boundary."
@@ -659,8 +665,15 @@ class BlockReceiver implements Closeable {
 int skip = 0;
 byte[] crcBytes = null;
 
-// First, overwrite the partial crc at the end, if necessary.
-if (doPartialCrc) { // not chunk-aligned on disk
+// First, 

hadoop git commit: HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn Sharp.

2015-03-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk c66c3ac6b -> 722b47946


HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/722b4794
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/722b4794
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/722b4794

Branch: refs/heads/trunk
Commit: 722b4794693d8bad1dee0ca5c2f99030a08402f9
Parents: c66c3ac
Author: Kihwal Lee 
Authored: Wed Mar 4 17:21:51 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Mar 4 17:21:51 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 48 
 .../server/protocol/DatanodeRegistration.java   | 10 
 .../blockmanagement/TestBlockManager.java   |  7 ---
 .../TestComputeInvalidateWork.java  | 16 +--
 .../TestDatanodeProtocolRetryPolicy.java|  3 +-
 6 files changed, 43 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/722b4794/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3c6d447..2be1a4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1091,6 +1091,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
 (Chris Nauroth via wheat9)
 
+HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/722b4794/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 779e3b9..f91696f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -47,19 +47,23 @@ public class DatanodeID implements Comparable {
   private int infoSecurePort; // info server port
   private int ipcPort;   // IPC server port
   private String xferAddr;
-  private int hashCode = -1;
 
   /**
* UUID identifying a given datanode. For upgraded Datanodes this is the
* same as the StorageID that was previously used by this Datanode. 
* For newly formatted Datanodes it is a UUID.
*/
-  private String datanodeUuid = null;
+  private final String datanodeUuid;
 
   public DatanodeID(DatanodeID from) {
+this(from.getDatanodeUuid(), from);
+  }
+
+  @VisibleForTesting
+  public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
 from.getHostName(),
-from.getDatanodeUuid(),
+datanodeUuid,
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -81,19 +85,24 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-this.ipAddr = ipAddr;
+setIpAndXferPort(ipAddr, xferPort);
 this.hostName = hostName;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
-this.xferPort = xferPort;
 this.infoPort = infoPort;
 this.infoSecurePort = infoSecurePort;
 this.ipcPort = ipcPort;
-updateXferAddrAndInvalidateHashCode();
   }
   
   public void setIpAddr(String ipAddr) {
+//updated during registration, preserve former xferPort
+setIpAndXferPort(ipAddr, xferPort);
+  }
+
+  private void setIpAndXferPort(String ipAddr, int xferPort) {
+// build xferAddr string to reduce cost of frequent use
 this.ipAddr = ipAddr;
-updateXferAddrAndInvalidateHashCode();
+this.xferPort = xferPort;
+this.xferAddr = ipAddr + ":" + xferPort;
   }
 
   public void setPeerHostName(String peerHostName) {
@@ -107,12 +116,6 @@ public class DatanodeID implements Comparable {
 return datanodeUuid;
   }
 
-  @VisibleForTesting
-  public void setDatanodeUuidForTesting(String datanodeUuid) {
-this.datanodeUuid = datanodeUuid;
-updateXferAddrAndInvalidateHashCode();
-  }
-
   private String checkDatanodeUuid(String uuid) {
 if (uuid == null || uuid.isEmpty()) {
   return nul

hadoop git commit: HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn Sharp. (cherry picked from commit 722b4794693d8bad1dee0ca5c2f99030a08402f9)

2015-03-04 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f4d6c5e33 -> f85530f64


HDFS-7434. DatanodeID hashCode should not be mutable. Contributed by Daryn 
Sharp.
(cherry picked from commit 722b4794693d8bad1dee0ca5c2f99030a08402f9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f85530f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f85530f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f85530f6

Branch: refs/heads/branch-2
Commit: f85530f649bd7c16bd7c1d4a3447863563d24c03
Parents: f4d6c5e
Author: Kihwal Lee 
Authored: Wed Mar 4 17:23:00 2015 -0600
Committer: Kihwal Lee 
Committed: Wed Mar 4 17:23:00 2015 -0600

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 48 
 .../server/protocol/DatanodeRegistration.java   | 10 
 .../blockmanagement/TestBlockManager.java   |  7 ---
 .../TestComputeInvalidateWork.java  | 16 +--
 .../TestDatanodeProtocolRetryPolicy.java|  3 +-
 6 files changed, 43 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f85530f6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42f7c8c..6d2ec99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -785,6 +785,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7879. hdfs.dll does not export functions of the public libhdfs API.
 (Chris Nauroth via wheat9)
 
+HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f85530f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 779e3b9..f91696f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -47,19 +47,23 @@ public class DatanodeID implements Comparable {
   private int infoSecurePort; // info server port
   private int ipcPort;   // IPC server port
   private String xferAddr;
-  private int hashCode = -1;
 
   /**
* UUID identifying a given datanode. For upgraded Datanodes this is the
* same as the StorageID that was previously used by this Datanode. 
* For newly formatted Datanodes it is a UUID.
*/
-  private String datanodeUuid = null;
+  private final String datanodeUuid;
 
   public DatanodeID(DatanodeID from) {
+this(from.getDatanodeUuid(), from);
+  }
+
+  @VisibleForTesting
+  public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
 from.getHostName(),
-from.getDatanodeUuid(),
+datanodeUuid,
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -81,19 +85,24 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-this.ipAddr = ipAddr;
+setIpAndXferPort(ipAddr, xferPort);
 this.hostName = hostName;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
-this.xferPort = xferPort;
 this.infoPort = infoPort;
 this.infoSecurePort = infoSecurePort;
 this.ipcPort = ipcPort;
-updateXferAddrAndInvalidateHashCode();
   }
   
   public void setIpAddr(String ipAddr) {
+//updated during registration, preserve former xferPort
+setIpAndXferPort(ipAddr, xferPort);
+  }
+
+  private void setIpAndXferPort(String ipAddr, int xferPort) {
+// build xferAddr string to reduce cost of frequent use
 this.ipAddr = ipAddr;
-updateXferAddrAndInvalidateHashCode();
+this.xferPort = xferPort;
+this.xferAddr = ipAddr + ":" + xferPort;
   }
 
   public void setPeerHostName(String peerHostName) {
@@ -107,12 +116,6 @@ public class DatanodeID implements Comparable {
 return datanodeUuid;
   }
 
-  @VisibleForTesting
-  public void setDatanodeUuidForTesting(String datanodeUuid) {
-this.datanodeUuid = datanodeUuid;
-updateXferAddrAndInvalidateHashCode();
-  }
-
   private String checkDatanodeUui

hadoop git commit: HDFS-7435. PB encoding of block reports is very inefficient. Contributed by Daryn Sharp.

2015-03-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk f446669af -> d324164a5


HDFS-7435. PB encoding of block reports is very inefficient. Contributed by 
Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d324164a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d324164a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d324164a

Branch: refs/heads/trunk
Commit: d324164a51a43d72c02567248bd9f0f12b244a40
Parents: f446669
Author: Kihwal Lee 
Authored: Fri Mar 13 14:13:55 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 13 14:23:37 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  | 660 +++
 .../DatanodeProtocolClientSideTranslatorPB.java |  22 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |  14 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +-
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../hdfs/server/datanode/BPServiceActor.java|  13 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  20 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/protocol/DatanodeRegistration.java   |   9 +
 .../hdfs/server/protocol/NamespaceInfo.java |  52 ++
 .../server/protocol/StorageBlockReport.java |   8 +-
 .../src/main/proto/DatanodeProtocol.proto   |   2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |   1 +
 .../hdfs/protocol/TestBlockListAsLongs.java | 237 +++
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../server/datanode/BlockReportTestBase.java|  27 +-
 .../server/datanode/SimulatedFSDataset.java |  11 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   9 +-
 .../datanode/TestDataNodeVolumeFailure.java |   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |  23 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   3 +-
 .../hdfs/server/namenode/TestFSImage.java   |   2 +
 .../TestOfflineEditsViewer.java |   9 +-
 26 files changed, 811 insertions(+), 354 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d324164a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 909182b..ac7e096 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -743,6 +743,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7491. Add incremental blockreport latency to DN metrics.
 (Ming Ma via cnauroth)
 
+HDFS-7435. PB encoding of block reports is very inefficient.
+(Daryn Sharp via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d324164a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 4389714..1c89ee4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -17,342 +17,458 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Random;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 
-/**
- * This class provides an interface for accessing list of blocks that
- * has been implemented as long[].
- * This class is useful for block report. Rather than send block reports
- * as a Block[] we can send it as a long[].
- *
- * The structure of the array is as follows:
- * 0: the length of the finalized replica list;
- * 1: the len

hadoop git commit: HDFS-7435. PB encoding of block reports is very inefficient. Contributed by Daryn Sharp. (cherry picked from commit d324164a51a43d72c02567248bd9f0f12b244a40)

2015-03-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 af80a98ac -> 464271a5e


HDFS-7435. PB encoding of block reports is very inefficient. Contributed by 
Daryn Sharp.
(cherry picked from commit d324164a51a43d72c02567248bd9f0f12b244a40)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/464271a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/464271a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/464271a5

Branch: refs/heads/branch-2
Commit: 464271a5ede6d05bc7a68ce3f86f84dc72ec1edd
Parents: af80a98
Author: Kihwal Lee 
Authored: Fri Mar 13 14:36:34 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 13 14:36:34 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  | 660 +++
 .../DatanodeProtocolClientSideTranslatorPB.java |  22 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |  14 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +-
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../hdfs/server/datanode/BPServiceActor.java|  13 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  22 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/protocol/DatanodeRegistration.java   |   9 +
 .../hdfs/server/protocol/NamespaceInfo.java |  52 ++
 .../server/protocol/StorageBlockReport.java |   8 +-
 .../src/main/proto/DatanodeProtocol.proto   |   2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |   1 +
 .../hdfs/protocol/TestBlockListAsLongs.java | 237 +++
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../server/datanode/BlockReportTestBase.java|  27 +-
 .../server/datanode/SimulatedFSDataset.java |  11 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   9 +-
 .../datanode/TestDataNodeVolumeFailure.java |   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |  23 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   3 +-
 .../hdfs/server/namenode/TestFSImage.java   |   2 +
 .../TestOfflineEditsViewer.java |   9 +-
 26 files changed, 812 insertions(+), 355 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/464271a5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d10dd29..99a278f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -438,6 +438,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7491. Add incremental blockreport latency to DN metrics.
 (Ming Ma via cnauroth)
 
+HDFS-7435. PB encoding of block reports is very inefficient.
+(Daryn Sharp via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/464271a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 4389714..1c89ee4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -17,342 +17,458 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Random;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 
-/**
- * This class provides an interface for accessing list of blocks that
- * has been implemented as long[].
- * This class

hadoop git commit: HDFS-7435. PB encoding of block reports is very inefficient. Contributed by Daryn Sharp. (cherry picked from commit d324164a51a43d72c02567248bd9f0f12b244a40)

2015-03-13 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 cdeb1079e -> 116a7f1a1


HDFS-7435. PB encoding of block reports is very inefficient. Contributed by 
Daryn Sharp.
(cherry picked from commit d324164a51a43d72c02567248bd9f0f12b244a40)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
(cherry picked from commit 464271a5ede6d05bc7a68ce3f86f84dc72ec1edd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/116a7f1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/116a7f1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/116a7f1a

Branch: refs/heads/branch-2.7
Commit: 116a7f1a16771458adf702bf15d7a4706805839c
Parents: cdeb107
Author: Kihwal Lee 
Authored: Fri Mar 13 14:42:02 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 13 14:42:02 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  | 660 +++
 .../DatanodeProtocolClientSideTranslatorPB.java |  22 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |  14 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +-
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../hdfs/server/datanode/BPServiceActor.java|  13 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  22 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   2 +-
 .../server/protocol/DatanodeRegistration.java   |   9 +
 .../hdfs/server/protocol/NamespaceInfo.java |  52 ++
 .../server/protocol/StorageBlockReport.java |   8 +-
 .../src/main/proto/DatanodeProtocol.proto   |   2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |   1 +
 .../hdfs/protocol/TestBlockListAsLongs.java | 237 +++
 .../blockmanagement/TestBlockManager.java   |   8 +-
 .../server/datanode/BlockReportTestBase.java|  27 +-
 .../server/datanode/SimulatedFSDataset.java |  11 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   9 +-
 .../datanode/TestDataNodeVolumeFailure.java |   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |  23 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   3 +-
 .../hdfs/server/namenode/TestFSImage.java   |   2 +
 .../TestOfflineEditsViewer.java |   9 +-
 26 files changed, 812 insertions(+), 355 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/116a7f1a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6a8e098..31f597f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -426,6 +426,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7491. Add incremental blockreport latency to DN metrics.
 (Ming Ma via cnauroth)
 
+HDFS-7435. PB encoding of block reports is very inefficient.
+(Daryn Sharp via kihwal)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/116a7f1a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 4389714..1c89ee4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -17,342 +17,458 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Random;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 
-/**
- * This class provides an interface for access

hadoop git commit: HDFS-7816. Unable to open webhdfs paths with "+". Contributed by Haohui Mai

2015-03-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 93d0f4acc -> e79be0ee1


HDFS-7816. Unable to open webhdfs paths with "+". Contributed by Haohui Mai


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e79be0ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e79be0ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e79be0ee

Branch: refs/heads/trunk
Commit: e79be0ee123d05104eb34eb854afcf9fa78baef2
Parents: 93d0f4a
Author: Kihwal Lee 
Authored: Thu Mar 19 08:01:01 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 19 08:01:01 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../datanode/web/webhdfs/ParameterParser.java   | 79 +++-
 .../web/webhdfs/TestParameterParser.java|  9 +--
 3 files changed, 84 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e79be0ee/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b0331b5..d9e8b9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1211,6 +1211,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7587. Edit log corruption can happen if append fails with a quota
 violation. (jing9)
 
+HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e79be0ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index f34402f..0ebf3dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
 
 import io.netty.handler.codec.http.QueryStringDecoder;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -39,6 +40,7 @@ import org.apache.hadoop.security.token.Token;
 
 import java.io.IOException;
 import java.net.URI;
+import java.nio.charset.Charset;
 import java.util.List;
 import java.util.Map;
 
@@ -51,7 +53,8 @@ class ParameterParser {
   private final Map> params;
 
   ParameterParser(QueryStringDecoder decoder, Configuration conf) {
-this.path = 
QueryStringDecoder.decodeComponent(decoder.path().substring(WEBHDFS_PREFIX_LENGTH));
+this.path = decodeComponent(decoder.path().substring
+(WEBHDFS_PREFIX_LENGTH), Charsets.UTF_8);
 this.params = decoder.parameters();
 this.conf = conf;
   }
@@ -127,4 +130,78 @@ class ParameterParser {
 List p = params.get(key);
 return p == null ? null : p.get(0);
   }
+
+  /**
+   * The following function behaves exactly the same as netty's
+   * QueryStringDecoder#decodeComponent except that it
+   * does not decode the '+' character as space. WebHDFS takes this scheme
+   * to maintain the backward-compatibility for pre-2.7 releases.
+   */
+  private static String decodeComponent(final String s, final Charset charset) 
{
+if (s == null) {
+  return "";
+}
+final int size = s.length();
+boolean modified = false;
+for (int i = 0; i < size; i++) {
+  final char c = s.charAt(i);
+  if (c == '%' || c == '+') {
+modified = true;
+break;
+  }
+}
+if (!modified) {
+  return s;
+}
+final byte[] buf = new byte[size];
+int pos = 0;  // position in `buf'.
+for (int i = 0; i < size; i++) {
+  char c = s.charAt(i);
+  if (c == '%') {
+if (i == size - 1) {
+  throw new IllegalArgumentException("unterminated escape sequence at" 
+
+ " end of string: " + s);
+}
+c = s.charAt(++i);
+if (c == '%') {
+  buf[pos++] = '%';  // "%%" -> "%"
+  break;
+}
+if (i == 

hadoop git commit: HDFS-7816. Unable to open webhdfs paths with "+". Contributed by Haohui Mai (cherry picked from commit e79be0ee123d05104eb34eb854afcf9fa78baef2)

2015-03-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a888dac34 -> ceb39c1cc


HDFS-7816. Unable to open webhdfs paths with "+". Contributed by Haohui Mai
(cherry picked from commit e79be0ee123d05104eb34eb854afcf9fa78baef2)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ceb39c1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ceb39c1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ceb39c1c

Branch: refs/heads/branch-2
Commit: ceb39c1cc6b52178172c879d96cd743e0dc7a650
Parents: a888dac
Author: Kihwal Lee 
Authored: Thu Mar 19 08:03:47 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 19 08:03:47 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../datanode/web/webhdfs/ParameterParser.java   | 79 +++-
 .../web/webhdfs/TestParameterParser.java|  9 +--
 3 files changed, 84 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ceb39c1c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 15b529a..9be5da8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -908,6 +908,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
 starts (Zhe Zhang via Colin P. McCabe)
 
+HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ceb39c1c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index f34402f..0ebf3dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
 
 import io.netty.handler.codec.http.QueryStringDecoder;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -39,6 +40,7 @@ import org.apache.hadoop.security.token.Token;
 
 import java.io.IOException;
 import java.net.URI;
+import java.nio.charset.Charset;
 import java.util.List;
 import java.util.Map;
 
@@ -51,7 +53,8 @@ class ParameterParser {
   private final Map> params;
 
   ParameterParser(QueryStringDecoder decoder, Configuration conf) {
-this.path = 
QueryStringDecoder.decodeComponent(decoder.path().substring(WEBHDFS_PREFIX_LENGTH));
+this.path = decodeComponent(decoder.path().substring
+(WEBHDFS_PREFIX_LENGTH), Charsets.UTF_8);
 this.params = decoder.parameters();
 this.conf = conf;
   }
@@ -127,4 +130,78 @@ class ParameterParser {
 List p = params.get(key);
 return p == null ? null : p.get(0);
   }
+
+  /**
+   * The following function behaves exactly the same as netty's
+   * QueryStringDecoder#decodeComponent except that it
+   * does not decode the '+' character as space. WebHDFS takes this scheme
+   * to maintain the backward-compatibility for pre-2.7 releases.
+   */
+  private static String decodeComponent(final String s, final Charset charset) 
{
+if (s == null) {
+  return "";
+}
+final int size = s.length();
+boolean modified = false;
+for (int i = 0; i < size; i++) {
+  final char c = s.charAt(i);
+  if (c == '%' || c == '+') {
+modified = true;
+break;
+  }
+}
+if (!modified) {
+  return s;
+}
+final byte[] buf = new byte[size];
+int pos = 0;  // position in `buf'.
+for (int i = 0; i < size; i++) {
+  char c = s.charAt(i);
+  if (c == '%') {
+if (i == size - 1) {
+  throw new IllegalArgumentException("unterminated escape sequence at" 
+
+ " end of string: " + s);
+}
+c = s.charAt(++i);
+

hadoop git commit: HDFS-7816. Unable to open webhdfs paths with "+". Contributed by Haohui Mai (cherry picked from commit e79be0ee123d05104eb34eb854afcf9fa78baef2)

2015-03-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7129287ef -> b8f269af9


HDFS-7816. Unable to open webhdfs paths with "+". Contributed by Haohui Mai
(cherry picked from commit e79be0ee123d05104eb34eb854afcf9fa78baef2)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(cherry picked from commit ceb39c1cc6b52178172c879d96cd743e0dc7a650)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8f269af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8f269af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8f269af

Branch: refs/heads/branch-2.7
Commit: b8f269af9dae4709eb2eb39465285c1dad369943
Parents: 7129287
Author: Kihwal Lee 
Authored: Thu Mar 19 08:04:19 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 19 08:04:19 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../datanode/web/webhdfs/ParameterParser.java   | 79 +++-
 .../web/webhdfs/TestParameterParser.java|  9 +--
 3 files changed, 84 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8f269af/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2ef679a..3dd5f1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -888,6 +888,8 @@ Release 2.7.0 - UNRELEASED
 HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
 starts (Zhe Zhang via Colin P. McCabe)
 
+HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8f269af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index f34402f..0ebf3dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
 
 import io.netty.handler.codec.http.QueryStringDecoder;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -39,6 +40,7 @@ import org.apache.hadoop.security.token.Token;
 
 import java.io.IOException;
 import java.net.URI;
+import java.nio.charset.Charset;
 import java.util.List;
 import java.util.Map;
 
@@ -51,7 +53,8 @@ class ParameterParser {
   private final Map> params;
 
   ParameterParser(QueryStringDecoder decoder, Configuration conf) {
-this.path = 
QueryStringDecoder.decodeComponent(decoder.path().substring(WEBHDFS_PREFIX_LENGTH));
+this.path = decodeComponent(decoder.path().substring
+(WEBHDFS_PREFIX_LENGTH), Charsets.UTF_8);
 this.params = decoder.parameters();
 this.conf = conf;
   }
@@ -127,4 +130,78 @@ class ParameterParser {
 List p = params.get(key);
 return p == null ? null : p.get(0);
   }
+
+  /**
+   * The following function behaves exactly the same as netty's
+   * QueryStringDecoder#decodeComponent except that it
+   * does not decode the '+' character as space. WebHDFS takes this scheme
+   * to maintain the backward-compatibility for pre-2.7 releases.
+   */
+  private static String decodeComponent(final String s, final Charset charset) 
{
+if (s == null) {
+  return "";
+}
+final int size = s.length();
+boolean modified = false;
+for (int i = 0; i < size; i++) {
+  final char c = s.charAt(i);
+  if (c == '%' || c == '+') {
+modified = true;
+break;
+  }
+}
+if (!modified) {
+  return s;
+}
+final byte[] buf = new byte[size];
+int pos = 0;  // position in `buf'.
+for (int i = 0; i < size; i++) {
+  char c = s.charAt(i);
+  if (c == '%') {
+if (i == size - 1) {
+  throw new IllegalArgumentException("unterminated escape sequence at" 
+
+ "

hadoop git commit: HDFS-7932. Speed up the shutdown of datanode during rolling upgrade. Contributed by Kihwal Lee. (cherry picked from commit 61a4c7fc9891def0e85edf7e41d74c6b92c85fdb)

2015-03-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ef9aa7c34 -> 2b9173059


HDFS-7932. Speed up the shutdown of datanode during rolling upgrade. 
Contributed by Kihwal Lee.
(cherry picked from commit 61a4c7fc9891def0e85edf7e41d74c6b92c85fdb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b917305
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b917305
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b917305

Branch: refs/heads/branch-2
Commit: 2b9173059d5941514107150b9d0a80d9debd85c4
Parents: ef9aa7c
Author: Kihwal Lee 
Authored: Thu Mar 19 12:28:36 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 19 12:28:36 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 7 ---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b917305/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2e5e8d1..ead8912 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -913,6 +913,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
 
+HDFS-7932. Speed up the shutdown of datanode during rolling 
upgrade.(kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b917305/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index c31d2b4..b32a0fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1731,8 +1731,9 @@ public class DataNode extends ReconfigurableBase
 // termination of receiver threads.
 if (!this.shutdownForUpgrade ||
 (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified
-> 2500))) {
+> 1000))) {
   this.threadGroup.interrupt();
+  break;
 }
 LOG.info("Waiting for threadgroup to exit, active threads is " +
  this.threadGroup.activeCount());
@@ -1743,8 +1744,8 @@ public class DataNode extends ReconfigurableBase
   Thread.sleep(sleepMs);
 } catch (InterruptedException e) {}
 sleepMs = sleepMs * 3 / 2; // exponential backoff
-if (sleepMs > 1000) {
-  sleepMs = 1000;
+if (sleepMs > 200) {
+  sleepMs = 200;
 }
   }
   this.threadGroup = null;



hadoop git commit: HDFS-7932. Speed up the shutdown of datanode during rolling upgrade. Contributed by Kihwal Lee.

2015-03-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1ccbc2970 -> 61a4c7fc9


HDFS-7932. Speed up the shutdown of datanode during rolling upgrade. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61a4c7fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61a4c7fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61a4c7fc

Branch: refs/heads/trunk
Commit: 61a4c7fc9891def0e85edf7e41d74c6b92c85fdb
Parents: 1ccbc29
Author: Kihwal Lee 
Authored: Thu Mar 19 12:27:03 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 19 12:27:03 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 7 ---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4c7fc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ac58c39..7392964 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1216,6 +1216,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
 
+HDFS-7932. Speed up the shutdown of datanode during rolling 
upgrade.(kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4c7fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 5be6a6d..e9befb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1724,8 +1724,9 @@ public class DataNode extends ReconfigurableBase
 // termination of receiver threads.
 if (!this.shutdownForUpgrade ||
 (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified
-> 2500))) {
+> 1000))) {
   this.threadGroup.interrupt();
+  break;
 }
 LOG.info("Waiting for threadgroup to exit, active threads is " +
  this.threadGroup.activeCount());
@@ -1736,8 +1737,8 @@ public class DataNode extends ReconfigurableBase
   Thread.sleep(sleepMs);
 } catch (InterruptedException e) {}
 sleepMs = sleepMs * 3 / 2; // exponential backoff
-if (sleepMs > 1000) {
-  sleepMs = 1000;
+if (sleepMs > 200) {
+  sleepMs = 200;
 }
   }
   this.threadGroup = null;



hadoop git commit: HDFS-7932. Speed up the shutdown of datanode during rolling upgrade. Contributed by Kihwal Lee. (cherry picked from commit 61a4c7fc9891def0e85edf7e41d74c6b92c85fdb)

2015-03-19 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4821f0cdb -> 4e967d0bc


HDFS-7932. Speed up the shutdown of datanode during rolling upgrade. 
Contributed by Kihwal Lee.
(cherry picked from commit 61a4c7fc9891def0e85edf7e41d74c6b92c85fdb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e967d0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e967d0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e967d0b

Branch: refs/heads/branch-2.7
Commit: 4e967d0bcda203e971cbac355954631a29f3a525
Parents: 4821f0c
Author: Kihwal Lee 
Authored: Thu Mar 19 12:29:16 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 19 12:29:16 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 2 ++
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 7 ---
 2 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e967d0b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ac83ec8..7e2348c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -893,6 +893,8 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
 
+HDFS-7932. Speed up the shutdown of datanode during rolling 
upgrade.(kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e967d0b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index c31d2b4..b32a0fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1731,8 +1731,9 @@ public class DataNode extends ReconfigurableBase
 // termination of receiver threads.
 if (!this.shutdownForUpgrade ||
 (this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified
-> 2500))) {
+> 1000))) {
   this.threadGroup.interrupt();
+  break;
 }
 LOG.info("Waiting for threadgroup to exit, active threads is " +
  this.threadGroup.activeCount());
@@ -1743,8 +1744,8 @@ public class DataNode extends ReconfigurableBase
   Thread.sleep(sleepMs);
 } catch (InterruptedException e) {}
 sleepMs = sleepMs * 3 / 2; // exponential backoff
-if (sleepMs > 1000) {
-  sleepMs = 1000;
+if (sleepMs > 200) {
+  sleepMs = 200;
 }
   }
   this.threadGroup = null;



[2/2] hadoop git commit: HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). Contributed by Vinayakumar B

2015-03-20 Thread kihwal
HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). 
Contributed by Vinayakumar B


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75ead273
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75ead273
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75ead273

Branch: refs/heads/trunk
Commit: 75ead273bea8a7dad61c4f99c3a16cab2697c498
Parents: d368d36
Author: Kihwal Lee 
Authored: Fri Mar 20 13:31:16 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 20 14:02:09 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 +--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 40 ++--
 .../org/apache/hadoop/hdfs/LeaseRenewer.java| 14 +++
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 38 +++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  5 ++-
 .../hadoop/hdfs/server/balancer/Balancer.java   |  8 ++--
 .../BlockInfoContiguousUnderConstruction.java   |  3 +-
 .../server/blockmanagement/BlockManager.java| 13 ---
 .../BlockPlacementPolicyDefault.java|  8 ++--
 .../blockmanagement/DatanodeDescriptor.java |  5 ++-
 .../server/blockmanagement/DatanodeManager.java | 12 +++---
 .../blockmanagement/DecommissionManager.java|  4 +-
 .../blockmanagement/HeartbeatManager.java   |  2 +-
 .../PendingReplicationBlocks.java   |  8 ++--
 .../hdfs/server/datanode/BPServiceActor.java| 35 +
 .../hdfs/server/datanode/DataXceiver.java   |  6 +--
 .../hdfs/server/namenode/Checkpointer.java  | 10 ++---
 .../server/namenode/EditLogOutputStream.java|  6 +--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 14 +++
 .../hdfs/server/namenode/FSEditLogLoader.java   | 10 ++---
 .../hdfs/server/namenode/FSImageFormat.java | 16 
 .../hdfs/server/namenode/FSNamesystem.java  | 24 +++-
 .../hdfs/server/namenode/LeaseManager.java  |  8 ++--
 .../hdfs/server/namenode/NamenodeFsck.java  |  6 +--
 .../hdfs/server/namenode/ha/EditLogTailer.java  | 16 
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 27 +++--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  2 +-
 .../org/apache/hadoop/hdfs/TestGetBlocks.java   | 14 +++
 .../hdfs/TestInjectionForSimulatedStorage.java  |  4 +-
 .../java/org/apache/hadoop/hdfs/TestLease.java  |  4 +-
 .../apache/hadoop/hdfs/TestLeaseRenewer.java| 10 ++---
 .../hadoop/hdfs/TestParallelReadUtil.java   |  4 +-
 .../org/apache/hadoop/hdfs/TestReplication.java |  4 +-
 .../hdfs/server/balancer/TestBalancer.java  |  8 ++--
 .../blockmanagement/BlockManagerTestUtil.java   |  2 +-
 .../TestBlockInfoUnderConstruction.java | 31 +++
 .../blockmanagement/TestHeartbeatHandling.java  | 20 +-
 .../blockmanagement/TestHostFileManager.java|  3 +-
 .../server/blockmanagement/TestNodeCount.java   |  4 +-
 .../TestOverReplicatedBlocks.java   | 11 +++---
 .../blockmanagement/TestReplicationPolicy.java  | 34 +
 .../server/datanode/BlockReportTestBase.java|  8 ++--
 .../server/datanode/TestBlockReplacement.java   |  8 ++--
 .../namenode/TestNamenodeCapacityReport.java|  5 ++-
 .../namenode/metrics/TestNameNodeMetrics.java   | 15 +---
 48 files changed, 304 insertions(+), 237 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ead273/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0ab14f2..e82c4c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1229,6 +1229,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7957. Truncate should verify quota before making changes. (jing9)
 
+HDFS-6841. Use Time.monotonicNow() wherever applicable instead of 
Time.now()
+(Vinayakumar B via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ead273/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 3236771..70f66bd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs

[1/2] hadoop git commit: HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). Contributed by Vinayakumar B

2015-03-20 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk d368d3647 -> 75ead273b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ead273/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 9b62467..8b2d11e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -262,7 +262,7 @@ public class TestBalancer {
   throws IOException, TimeoutException {
 long timeout = TIMEOUT;
 long failtime = (timeout <= 0L) ? Long.MAX_VALUE
- : Time.now() + timeout;
+ : Time.monotonicNow() + timeout;
 
 while (true) {
   long[] status = client.getStats();
@@ -274,7 +274,7 @@ public class TestBalancer {
   && usedSpaceVariance < CAPACITY_ALLOWED_VARIANCE)
 break; //done
 
-  if (Time.now() > failtime) {
+  if (Time.monotonicNow() > failtime) {
 throw new TimeoutException("Cluster failed to reached expected values 
of "
 + "totalSpace (current: " + status[0] 
 + ", expected: " + expectedTotalSpace 
@@ -369,7 +369,7 @@ public class TestBalancer {
   int expectedExcludedNodes) throws IOException, TimeoutException {
 long timeout = TIMEOUT;
 long failtime = (timeout <= 0L) ? Long.MAX_VALUE
-: Time.now() + timeout;
+: Time.monotonicNow() + timeout;
 if (!p.nodesToBeIncluded.isEmpty()) {
   totalCapacity = p.nodesToBeIncluded.size() * CAPACITY;
 }
@@ -399,7 +399,7 @@ public class TestBalancer {
 }
 if (Math.abs(avgUtilization - nodeUtilization) > 
BALANCE_ALLOWED_VARIANCE) {
   balanced = false;
-  if (Time.now() > failtime) {
+  if (Time.monotonicNow() > failtime) {
 throw new TimeoutException(
 "Rebalancing expected avg utilization to become "
 + avgUtilization + ", but on datanode " + datanode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ead273/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index f61176e..23e610f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -186,7 +186,7 @@ public class BlockManagerTestUtil {
   Assert.assertNotNull("Could not find DN with name: " + dnName, theDND);
   
   synchronized (hbm) {
-theDND.setLastUpdate(0);
+DFSTestUtil.setDatanodeDead(theDND);
 hbm.heartbeatCheck();
   }
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ead273/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
index 453f411..a7ba293 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
 /**
@@ -46,40 +47,34 @@ public class TestBlockInfoUnderConstruction {
 new DatanodeStorageInfo[] {s1, s2, s3});
 
 // Recovery attempt #1.
-long currentTime = System.currentTimeMillis();
-dd1.setLastUpdate(currentTime - 3 * 1000);
-dd2.setLastUpdate(currentTime - 1 * 1000);
-dd3.setLastUpdate(currentTime - 2 * 1000);
+DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
+ 

[1/2] hadoop git commit: HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). Contributed by Vinayakumar B (cherry picked from commit 99a8dcd19528b265d4fda9ae09a17e4af52f2782)

2015-03-20 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6be52e42a -> 29642b33c


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29642b33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index d71d6f5..b5ef72b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -262,7 +262,7 @@ public class TestBalancer {
   throws IOException, TimeoutException {
 long timeout = TIMEOUT;
 long failtime = (timeout <= 0L) ? Long.MAX_VALUE
- : Time.now() + timeout;
+ : Time.monotonicNow() + timeout;
 
 while (true) {
   long[] status = client.getStats();
@@ -274,7 +274,7 @@ public class TestBalancer {
   && usedSpaceVariance < CAPACITY_ALLOWED_VARIANCE)
 break; //done
 
-  if (Time.now() > failtime) {
+  if (Time.monotonicNow() > failtime) {
 throw new TimeoutException("Cluster failed to reached expected values 
of "
 + "totalSpace (current: " + status[0] 
 + ", expected: " + expectedTotalSpace 
@@ -369,7 +369,7 @@ public class TestBalancer {
   int expectedExcludedNodes) throws IOException, TimeoutException {
 long timeout = TIMEOUT;
 long failtime = (timeout <= 0L) ? Long.MAX_VALUE
-: Time.now() + timeout;
+: Time.monotonicNow() + timeout;
 if (!p.nodesToBeIncluded.isEmpty()) {
   totalCapacity = p.nodesToBeIncluded.size() * CAPACITY;
 }
@@ -399,7 +399,7 @@ public class TestBalancer {
 }
 if (Math.abs(avgUtilization - nodeUtilization) > 
BALANCE_ALLOWED_VARIANCE) {
   balanced = false;
-  if (Time.now() > failtime) {
+  if (Time.monotonicNow() > failtime) {
 throw new TimeoutException(
 "Rebalancing expected avg utilization to become "
 + avgUtilization + ", but on datanode " + datanode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29642b33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index f61176e..23e610f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -186,7 +186,7 @@ public class BlockManagerTestUtil {
   Assert.assertNotNull("Could not find DN with name: " + dnName, theDND);
   
   synchronized (hbm) {
-theDND.setLastUpdate(0);
+DFSTestUtil.setDatanodeDead(theDND);
 hbm.heartbeatCheck();
   }
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29642b33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
index 453f411..a7ba293 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
 /**
@@ -46,40 +47,34 @@ public class TestBlockInfoUnderConstruction {
 new DatanodeStorageInfo[] {s1, s2, s3});
 
 // Recovery attempt #1.
-long currentTime = System.currentTimeMillis();
-dd1.setLastUpdate(currentTime - 3 * 1000);
-dd2.setLastUpdate(currentTime - 1 * 1000);
-dd3.setLastUpdate(currentTime - 2 * 1000);
+DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);

[2/2] hadoop git commit: HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). Contributed by Vinayakumar B (cherry picked from commit 99a8dcd19528b265d4fda9ae09a17e4af52f2782)

2015-03-20 Thread kihwal
HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). 
Contributed by Vinayakumar B
(cherry picked from commit 99a8dcd19528b265d4fda9ae09a17e4af52f2782)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29642b33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29642b33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29642b33

Branch: refs/heads/branch-2
Commit: 29642b33cbd31155c77ec4339201b2af1508b5e3
Parents: 6be52e4
Author: Kihwal Lee 
Authored: Fri Mar 20 13:52:09 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 20 14:05:02 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 +--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 40 ++--
 .../org/apache/hadoop/hdfs/LeaseRenewer.java| 14 +++
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 37 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  5 ++-
 .../hadoop/hdfs/server/balancer/Balancer.java   |  8 ++--
 .../BlockInfoContiguousUnderConstruction.java   |  3 +-
 .../server/blockmanagement/BlockManager.java| 13 ---
 .../BlockPlacementPolicyDefault.java|  8 ++--
 .../blockmanagement/DatanodeDescriptor.java |  5 ++-
 .../server/blockmanagement/DatanodeManager.java | 12 +++---
 .../blockmanagement/DecommissionManager.java|  4 +-
 .../blockmanagement/HeartbeatManager.java   |  2 +-
 .../PendingReplicationBlocks.java   |  8 ++--
 .../hdfs/server/datanode/BPServiceActor.java| 35 +
 .../hdfs/server/datanode/DataXceiver.java   |  6 +--
 .../hdfs/server/namenode/Checkpointer.java  | 10 ++---
 .../server/namenode/EditLogOutputStream.java|  6 +--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 14 +++
 .../hdfs/server/namenode/FSEditLogLoader.java   | 10 ++---
 .../hdfs/server/namenode/FSImageFormat.java | 16 
 .../hdfs/server/namenode/FSNamesystem.java  | 24 +++-
 .../hdfs/server/namenode/LeaseManager.java  |  8 ++--
 .../hdfs/server/namenode/NamenodeFsck.java  |  6 +--
 .../hdfs/server/namenode/ha/EditLogTailer.java  | 16 
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 26 +++--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  2 +-
 .../org/apache/hadoop/hdfs/TestGetBlocks.java   | 14 +++
 .../hdfs/TestInjectionForSimulatedStorage.java  |  4 +-
 .../java/org/apache/hadoop/hdfs/TestLease.java  |  4 +-
 .../apache/hadoop/hdfs/TestLeaseRenewer.java| 10 ++---
 .../hadoop/hdfs/TestParallelReadUtil.java   |  4 +-
 .../org/apache/hadoop/hdfs/TestReplication.java |  4 +-
 .../hdfs/server/balancer/TestBalancer.java  |  8 ++--
 .../blockmanagement/BlockManagerTestUtil.java   |  2 +-
 .../TestBlockInfoUnderConstruction.java | 31 +++
 .../blockmanagement/TestHeartbeatHandling.java  | 20 +-
 .../blockmanagement/TestHostFileManager.java|  3 +-
 .../server/blockmanagement/TestNodeCount.java   |  4 +-
 .../TestOverReplicatedBlocks.java   | 11 +++---
 .../blockmanagement/TestReplicationPolicy.java  | 34 +
 .../server/datanode/BlockReportTestBase.java|  8 ++--
 .../server/datanode/TestBlockReplacement.java   |  8 ++--
 .../namenode/TestNamenodeCapacityReport.java|  5 ++-
 .../namenode/metrics/TestNameNodeMetrics.java   | 15 +---
 48 files changed, 303 insertions(+), 236 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29642b33/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 580b533..61df4d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -926,6 +926,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7957. Truncate should verify quota before making changes. (jing9)
 
+HDFS-6841. Use Time.monotonicNow() wherever applicable instead of 
Time.now()
+(Vinayakumar B via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/29642b33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git

[1/2] hadoop git commit: HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). Contributed by Vinayakumar B (cherry picked from commit 99a8dcd19528b265d4fda9ae09a17e4af52f2782)

2015-03-20 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 03fb9b4ce -> 08f2f7ed3


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f2f7ed/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index d71d6f5..b5ef72b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -262,7 +262,7 @@ public class TestBalancer {
   throws IOException, TimeoutException {
 long timeout = TIMEOUT;
 long failtime = (timeout <= 0L) ? Long.MAX_VALUE
- : Time.now() + timeout;
+ : Time.monotonicNow() + timeout;
 
 while (true) {
   long[] status = client.getStats();
@@ -274,7 +274,7 @@ public class TestBalancer {
   && usedSpaceVariance < CAPACITY_ALLOWED_VARIANCE)
 break; //done
 
-  if (Time.now() > failtime) {
+  if (Time.monotonicNow() > failtime) {
 throw new TimeoutException("Cluster failed to reached expected values 
of "
 + "totalSpace (current: " + status[0] 
 + ", expected: " + expectedTotalSpace 
@@ -369,7 +369,7 @@ public class TestBalancer {
   int expectedExcludedNodes) throws IOException, TimeoutException {
 long timeout = TIMEOUT;
 long failtime = (timeout <= 0L) ? Long.MAX_VALUE
-: Time.now() + timeout;
+: Time.monotonicNow() + timeout;
 if (!p.nodesToBeIncluded.isEmpty()) {
   totalCapacity = p.nodesToBeIncluded.size() * CAPACITY;
 }
@@ -399,7 +399,7 @@ public class TestBalancer {
 }
 if (Math.abs(avgUtilization - nodeUtilization) > 
BALANCE_ALLOWED_VARIANCE) {
   balanced = false;
-  if (Time.now() > failtime) {
+  if (Time.monotonicNow() > failtime) {
 throw new TimeoutException(
 "Rebalancing expected avg utilization to become "
 + avgUtilization + ", but on datanode " + datanode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f2f7ed/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index f61176e..23e610f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -186,7 +186,7 @@ public class BlockManagerTestUtil {
   Assert.assertNotNull("Could not find DN with name: " + dnName, theDND);
   
   synchronized (hbm) {
-theDND.setLastUpdate(0);
+DFSTestUtil.setDatanodeDead(theDND);
 hbm.heartbeatCheck();
   }
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f2f7ed/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
index 453f411..a7ba293 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
 /**
@@ -46,40 +47,34 @@ public class TestBlockInfoUnderConstruction {
 new DatanodeStorageInfo[] {s1, s2, s3});
 
 // Recovery attempt #1.
-long currentTime = System.currentTimeMillis();
-dd1.setLastUpdate(currentTime - 3 * 1000);
-dd2.setLastUpdate(currentTime - 1 * 1000);
-dd3.setLastUpdate(currentTime - 2 * 1000);
+DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000

[2/2] hadoop git commit: HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). Contributed by Vinayakumar B (cherry picked from commit 99a8dcd19528b265d4fda9ae09a17e4af52f2782)

2015-03-20 Thread kihwal
HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now(). 
Contributed by Vinayakumar B
(cherry picked from commit 99a8dcd19528b265d4fda9ae09a17e4af52f2782)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
(cherry picked from commit 63b7870aec007081c82427587cb6e1d38e2c70f8)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08f2f7ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08f2f7ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08f2f7ed

Branch: refs/heads/branch-2.7
Commit: 08f2f7ed34b0136f14ad61ee0838a53864b0def0
Parents: 03fb9b4
Author: Kihwal Lee 
Authored: Fri Mar 20 13:59:01 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 20 14:06:02 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 +--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 40 ++--
 .../org/apache/hadoop/hdfs/LeaseRenewer.java| 14 +++
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 37 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  5 ++-
 .../hadoop/hdfs/server/balancer/Balancer.java   |  8 ++--
 .../BlockInfoContiguousUnderConstruction.java   |  3 +-
 .../server/blockmanagement/BlockManager.java| 13 ---
 .../BlockPlacementPolicyDefault.java|  8 ++--
 .../blockmanagement/DatanodeDescriptor.java |  5 ++-
 .../server/blockmanagement/DatanodeManager.java | 12 +++---
 .../blockmanagement/DecommissionManager.java|  4 +-
 .../blockmanagement/HeartbeatManager.java   |  2 +-
 .../PendingReplicationBlocks.java   |  8 ++--
 .../hdfs/server/datanode/BPServiceActor.java| 35 +
 .../hdfs/server/datanode/DataXceiver.java   |  6 +--
 .../hdfs/server/namenode/Checkpointer.java  | 10 ++---
 .../server/namenode/EditLogOutputStream.java|  6 +--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 14 +++
 .../hdfs/server/namenode/FSEditLogLoader.java   | 10 ++---
 .../hdfs/server/namenode/FSImageFormat.java | 16 
 .../hdfs/server/namenode/FSNamesystem.java  | 24 +++-
 .../hdfs/server/namenode/LeaseManager.java  |  8 ++--
 .../hdfs/server/namenode/NamenodeFsck.java  |  6 +--
 .../hdfs/server/namenode/ha/EditLogTailer.java  | 16 
 .../org/apache/hadoop/hdfs/web/JsonUtil.java|  2 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 26 +++--
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  2 +-
 .../org/apache/hadoop/hdfs/TestGetBlocks.java   | 14 +++
 .../hdfs/TestInjectionForSimulatedStorage.java  |  4 +-
 .../java/org/apache/hadoop/hdfs/TestLease.java  |  4 +-
 .../apache/hadoop/hdfs/TestLeaseRenewer.java| 10 ++---
 .../hadoop/hdfs/TestParallelReadUtil.java   |  4 +-
 .../org/apache/hadoop/hdfs/TestReplication.java |  4 +-
 .../hdfs/server/balancer/TestBalancer.java  |  8 ++--
 .../blockmanagement/BlockManagerTestUtil.java   |  2 +-
 .../TestBlockInfoUnderConstruction.java | 31 +++
 .../blockmanagement/TestHeartbeatHandling.java  | 20 +-
 .../blockmanagement/TestHostFileManager.java|  3 +-
 .../server/blockmanagement/TestNodeCount.java   |  4 +-
 .../TestOverReplicatedBlocks.java   | 11 +++---
 .../blockmanagement/TestReplicationPolicy.java  | 34 +
 .../server/datanode/BlockReportTestBase.java|  8 ++--
 .../server/datanode/TestBlockReplacement.java   |  8 ++--
 .../namenode/TestNamenodeCapacityReport.java|  5 ++-
 .../namenode/metrics/TestNameNodeMetrics.java   | 15 +---
 48 files changed, 303 insertions(+), 236 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f2f7ed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bc6f999..95cfb2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -901,6 +901,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7957. Truncate should verify quota before making changes. (jing9)
 
+HDFS-6841. Use Time.monotonicNow() wherever applicable instead of 
Time.now()
+(Vinayakumar B via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop

hadoop git commit: HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes a lot of time if disks are busy. Contributed by Rushabh Shah.

2015-03-25 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 380788426 -> fc1031af7


HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes a lot 
of time if disks are busy. Contributed by Rushabh Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc1031af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc1031af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc1031af

Branch: refs/heads/trunk
Commit: fc1031af749435dc95efea6745b1b2300ce29446
Parents: 3807884
Author: Kihwal Lee 
Authored: Wed Mar 25 14:42:28 2015 -0500
Committer: Kihwal Lee 
Committed: Wed Mar 25 14:42:59 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  |  37 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java | 268 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   3 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   8 +-
 .../datanode/fsdataset/impl/FsVolumeList.java   |   7 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   5 +-
 .../hdfs/server/datanode/DataNodeTestUtils.java |   7 +
 .../fsdataset/impl/TestWriteToReplica.java  | 152 +++
 .../namenode/TestListCorruptFileBlocks.java |   6 +
 .../namenode/TestProcessCorruptBlocks.java  |   3 +
 .../ha/TestPendingCorruptDnMessages.java|   3 +
 13 files changed, 430 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc1031af/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8f1d5fc..62c2f91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -339,6 +339,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-7713. Implement mkdirs in the HDFS Web UI. (Ravi Prakash via wheat9)
 
+HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes
+a lot of time if disks are busy (Rushabh S Shah via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc1031af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 1c89ee4..834546b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -33,6 +35,7 @@ import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.WireFormat;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -108,6 +111,40 @@ public abstract class BlockListAsLongs implements 
Iterable {
 return builder.build();
   }
 
+  public static BlockListAsLongs readFrom(InputStream is) throws IOException {
+CodedInputStream cis = CodedInputStream.newInstance(is);
+int numBlocks = -1;
+ByteString blocksBuf = null;
+while (!cis.isAtEnd()) {
+  int tag = cis.readTag();
+  int field = WireFormat.getTagFieldNumber(tag);
+  switch(field) {
+case 0:
+  break;
+case 1:
+  numBlocks = (int)cis.readInt32();
+  break;
+case 2:
+  blocksBuf = cis.readBytes();
+  break;
+default:
+  cis.skipField(tag);
+  break;
+  }
+}
+if (numBlocks != -1 && blocksBuf != null) {
+  return decodeBuffer(numBlocks, blocksBuf);
+}
+return null;
+  }
+
+  public void writeTo(OutputStream os) throws IOException {
+CodedOutputStream cos = CodedOutputStream.newInstance(os);
+cos.writeInt32(1, getNumberOfBlocks());
+cos.writeBytes(2, getBlocksBuffer());
+cos.flush();
+  }
+  
   public static Builder builder() {
 return new BlockListAsLongs.Builder();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc1031af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Da

hadoop git commit: HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes a lot of time if disks are busy. Contributed by Rushabh Shah.

2015-03-25 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c62840d53 -> be4eabdcd


HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes a lot 
of time if disks are busy. Contributed by Rushabh Shah.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be4eabdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be4eabdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be4eabdc

Branch: refs/heads/branch-2
Commit: be4eabdcd48b1a9c24e291d886f09afe8d4704bc
Parents: c62840d
Author: Kihwal Lee 
Authored: Wed Mar 25 14:42:28 2015 -0500
Committer: Kihwal Lee 
Committed: Wed Mar 25 14:51:00 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  |  37 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java | 268 ++-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   3 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   8 +-
 .../datanode/fsdataset/impl/FsVolumeList.java   |   7 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   5 +-
 .../hdfs/server/datanode/DataNodeTestUtils.java |   7 +
 .../fsdataset/impl/TestWriteToReplica.java  | 152 +++
 .../namenode/TestListCorruptFileBlocks.java |   6 +
 .../namenode/TestProcessCorruptBlocks.java  |   3 +
 .../ha/TestPendingCorruptDnMessages.java|   3 +
 13 files changed, 430 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be4eabdc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 01d678f..6791d88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -24,6 +24,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-7713. Implement mkdirs in the HDFS Web UI. (Ravi Prakash via wheat9)
 
+HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes
+a lot of time if disks are busy (Rushabh S Shah via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be4eabdc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 1c89ee4..834546b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -33,6 +35,7 @@ import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.WireFormat;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -108,6 +111,40 @@ public abstract class BlockListAsLongs implements 
Iterable {
 return builder.build();
   }
 
+  public static BlockListAsLongs readFrom(InputStream is) throws IOException {
+CodedInputStream cis = CodedInputStream.newInstance(is);
+int numBlocks = -1;
+ByteString blocksBuf = null;
+while (!cis.isAtEnd()) {
+  int tag = cis.readTag();
+  int field = WireFormat.getTagFieldNumber(tag);
+  switch(field) {
+case 0:
+  break;
+case 1:
+  numBlocks = (int)cis.readInt32();
+  break;
+case 2:
+  blocksBuf = cis.readBytes();
+  break;
+default:
+  cis.skipField(tag);
+  break;
+  }
+}
+if (numBlocks != -1 && blocksBuf != null) {
+  return decodeBuffer(numBlocks, blocksBuf);
+}
+return null;
+  }
+
+  public void writeTo(OutputStream os) throws IOException {
+CodedOutputStream cos = CodedOutputStream.newInstance(os);
+cos.writeInt32(1, getNumberOfBlocks());
+cos.writeBytes(2, getBlocksBuffer());
+cos.flush();
+  }
+  
   public static Builder builder() {
 return new BlockListAsLongs.Builder();
   }

http://git-wip-us.apache.or

hadoop git commit: HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake Iwasaki.

2015-03-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk b4b4fe905 -> 222845632


HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake 
Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22284563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22284563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22284563

Branch: refs/heads/trunk
Commit: 222845632bc2919b03ee08d5bafff3233511e0d5
Parents: b4b4fe9
Author: Kihwal Lee 
Authored: Thu Mar 26 08:42:45 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 26 08:44:58 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/tracing/TestTracing.java  | 28 
 2 files changed, 26 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22284563/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 62c2f91..51842ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1277,6 +1277,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7977. NFS couldn't take percentile intervals (brandonli)
 
+HDFS-7963. Fix expected tracing spans in TestTracing along with HDFS-7054.
+(Masatake Iwasaki via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22284563/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index 0bbd5b4..3720abe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -88,7 +88,10 @@ public class TestTracing {
   "ClientNamenodeProtocol#fsync",
   "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
   "ClientNamenodeProtocol#complete",
-  "DFSOutputStream",
+  "newStreamForCreate",
+  "DFSOutputStream#writeChunk",
+  "DFSOutputStream#close",
+  "dataStreamer",
   "OpWriteBlockProto",
   "org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
   "ClientNamenodeProtocol#addBlock"
@@ -102,10 +105,25 @@ public class TestTracing {
 long spanStart = s.getStartTimeMillis();
 long spanEnd = s.getStopTimeMillis();
 
-// There should only be one trace id as it should all be homed in the
-// top trace.
-for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
-  Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+// Spans homed in the top trace shoud have same trace id.
+// Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
+// and children of them are exception.
+String[] spansInTopTrace = {
+  "testWriteTraceHooks",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
+  "ClientNamenodeProtocol#create",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
+  "ClientNamenodeProtocol#fsync",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
+  "ClientNamenodeProtocol#complete",
+  "newStreamForCreate",
+  "DFSOutputStream#writeChunk",
+  "DFSOutputStream#close",
+};
+for (String desc : spansInTopTrace) {
+  for (Span span : map.get(desc)) {
+Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+  }
 }
   }
 



hadoop git commit: HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake Iwasaki.

2015-03-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c33ecd83e -> 071277b9c


HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake 
Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/071277b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/071277b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/071277b9

Branch: refs/heads/branch-2
Commit: 071277b9cece8da9d891f525c4dd659f2a4949dd
Parents: c33ecd8
Author: Kihwal Lee 
Authored: Thu Mar 26 08:42:45 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 26 08:42:45 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/tracing/TestTracing.java  | 28 
 2 files changed, 26 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/071277b9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6791d88..0c1640d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -973,6 +973,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7977. NFS couldn't take percentile intervals (brandonli)
 
+HDFS-7963. Fix expected tracing spans in TestTracing along with HDFS-7054.
+(Masatake Iwasaki via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/071277b9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index 0bbd5b4..3720abe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -88,7 +88,10 @@ public class TestTracing {
   "ClientNamenodeProtocol#fsync",
   "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
   "ClientNamenodeProtocol#complete",
-  "DFSOutputStream",
+  "newStreamForCreate",
+  "DFSOutputStream#writeChunk",
+  "DFSOutputStream#close",
+  "dataStreamer",
   "OpWriteBlockProto",
   "org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
   "ClientNamenodeProtocol#addBlock"
@@ -102,10 +105,25 @@ public class TestTracing {
 long spanStart = s.getStartTimeMillis();
 long spanEnd = s.getStopTimeMillis();
 
-// There should only be one trace id as it should all be homed in the
-// top trace.
-for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
-  Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+// Spans homed in the top trace shoud have same trace id.
+// Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
+// and children of them are exception.
+String[] spansInTopTrace = {
+  "testWriteTraceHooks",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
+  "ClientNamenodeProtocol#create",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
+  "ClientNamenodeProtocol#fsync",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
+  "ClientNamenodeProtocol#complete",
+  "newStreamForCreate",
+  "DFSOutputStream#writeChunk",
+  "DFSOutputStream#close",
+};
+for (String desc : spansInTopTrace) {
+  for (Span span : map.get(desc)) {
+Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+  }
 }
   }
 



hadoop git commit: HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake Iwasaki.

2015-03-26 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 2e9c690e9 -> 370c91461


HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake 
Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/370c9146
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/370c9146
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/370c9146

Branch: refs/heads/branch-2.7
Commit: 370c91461f9d0756944dd36983ecdbd38c233af3
Parents: 2e9c690
Author: Kihwal Lee 
Authored: Thu Mar 26 08:42:45 2015 -0500
Committer: Kihwal Lee 
Committed: Thu Mar 26 08:45:39 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/tracing/TestTracing.java  | 28 
 2 files changed, 26 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/370c9146/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cb6b88d..50b4ad5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -932,6 +932,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7977. NFS couldn't take percentile intervals (brandonli)
 
+HDFS-7963. Fix expected tracing spans in TestTracing along with HDFS-7054.
+(Masatake Iwasaki via kihwal)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/370c9146/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index 0bbd5b4..3720abe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -88,7 +88,10 @@ public class TestTracing {
   "ClientNamenodeProtocol#fsync",
   "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
   "ClientNamenodeProtocol#complete",
-  "DFSOutputStream",
+  "newStreamForCreate",
+  "DFSOutputStream#writeChunk",
+  "DFSOutputStream#close",
+  "dataStreamer",
   "OpWriteBlockProto",
   "org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
   "ClientNamenodeProtocol#addBlock"
@@ -102,10 +105,25 @@ public class TestTracing {
 long spanStart = s.getStartTimeMillis();
 long spanEnd = s.getStopTimeMillis();
 
-// There should only be one trace id as it should all be homed in the
-// top trace.
-for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
-  Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+// Spans homed in the top trace shoud have same trace id.
+// Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
+// and children of them are exception.
+String[] spansInTopTrace = {
+  "testWriteTraceHooks",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
+  "ClientNamenodeProtocol#create",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
+  "ClientNamenodeProtocol#fsync",
+  "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
+  "ClientNamenodeProtocol#complete",
+  "newStreamForCreate",
+  "DFSOutputStream#writeChunk",
+  "DFSOutputStream#close",
+};
+for (String desc : spansInTopTrace) {
+  for (Span span : map.get(desc)) {
+Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+  }
 }
   }
 



hadoop git commit: HDFS-7990. IBR delete ack should not be delayed. Contributed by Daryn Sharp.

2015-03-27 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk af618f23a -> 60882ab26


HDFS-7990. IBR delete ack should not be delayed. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60882ab2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60882ab2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60882ab2

Branch: refs/heads/trunk
Commit: 60882ab26d49f05cbf0686944af6559f86b3417d
Parents: af618f2
Author: Kihwal Lee 
Authored: Fri Mar 27 09:05:17 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 27 09:05:17 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  2 ++
 .../hdfs/server/datanode/BPServiceActor.java   | 17 +++--
 .../apache/hadoop/hdfs/server/datanode/DNConf.java |  2 --
 .../hdfs/server/datanode/SimulatedFSDataset.java   | 13 -
 .../datanode/TestIncrementalBlockReports.java  |  4 ++--
 5 files changed, 23 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60882ab2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dff8bd2..72ea4fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -342,6 +342,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes
 a lot of time if disks are busy (Rushabh S Shah via kihwal)
 
+HDFS-7990. IBR delete ack should not be delayed. (daryn via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60882ab2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 10cce45..3b4756c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -82,12 +82,11 @@ class BPServiceActor implements Runnable {
 
   final BPOfferService bpos;
   
-  // lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read
+  // lastBlockReport and lastHeartbeat may be assigned/read
   // by testing threads (through BPServiceActor#triggerXXX), while also 
   // assigned/read by the actor thread. Thus they should be declared as 
volatile
   // to make sure the "happens-before" consistency.
   volatile long lastBlockReport = 0;
-  volatile long lastDeletedReport = 0;
 
   boolean resetBlockReportTime = true;
 
@@ -417,10 +416,10 @@ class BPServiceActor implements Runnable {
   @VisibleForTesting
   void triggerDeletionReportForTests() {
 synchronized (pendingIncrementalBRperStorage) {
-  lastDeletedReport = 0;
+  sendImmediateIBR = true;
   pendingIncrementalBRperStorage.notifyAll();
 
-  while (lastDeletedReport == 0) {
+  while (sendImmediateIBR) {
 try {
   pendingIncrementalBRperStorage.wait(100);
 } catch (InterruptedException e) {
@@ -465,7 +464,6 @@ class BPServiceActor implements Runnable {
 // or we will report an RBW replica after the BlockReport already reports
 // a FINALIZED one.
 reportReceivedDeletedBlocks();
-lastDeletedReport = startTime;
 
 long brCreateStartTime = monotonicNow();
 Map perVolumeBlockLists =
@@ -674,7 +672,6 @@ class BPServiceActor implements Runnable {
*/
   private void offerService() throws Exception {
 LOG.info("For namenode " + nnAddr + " using"
-+ " DELETEREPORT_INTERVAL of " + dnConf.deleteReportInterval + " msec "
 + " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec"
 + " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec"
 + " Initial delay: " + dnConf.initialBlockReportDelay + "msec"
@@ -690,7 +687,9 @@ class BPServiceActor implements Runnable {
 //
 // Every so often, send heartbeat or block-report
 //
-if (startTime - lastHeartbeat >= dnConf.heartBeatInterval) {
+boolean sendHeartbeat =
+startTime - lastHeartbeat >= dnConf.heartBeatInterval;
+if (sendHeartbeat) {
   //
   // All heartbeat messag

hadoop git commit: HDFS-7990. IBR delete ack should not be delayed. Contributed by Daryn Sharp. (cherry picked from commit 60882ab26d49f05cbf0686944af6559f86b3417d)

2015-03-27 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 36371cddd -> 577ea0886


HDFS-7990. IBR delete ack should not be delayed. Contributed by Daryn Sharp.
(cherry picked from commit 60882ab26d49f05cbf0686944af6559f86b3417d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/577ea088
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/577ea088
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/577ea088

Branch: refs/heads/branch-2
Commit: 577ea088655480a7550d968c02f8b5dd8b0d5298
Parents: 36371cd
Author: Kihwal Lee 
Authored: Fri Mar 27 09:06:23 2015 -0500
Committer: Kihwal Lee 
Committed: Fri Mar 27 09:06:23 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt|  2 ++
 .../hdfs/server/datanode/BPServiceActor.java   | 17 +++--
 .../apache/hadoop/hdfs/server/datanode/DNConf.java |  2 --
 .../hdfs/server/datanode/SimulatedFSDataset.java   | 13 -
 .../datanode/TestIncrementalBlockReports.java  |  4 ++--
 5 files changed, 23 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/577ea088/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 743a5c2..09bbfaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -27,6 +27,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes
 a lot of time if disks are busy (Rushabh S Shah via kihwal)
 
+HDFS-7990. IBR delete ack should not be delayed. (daryn via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/577ea088/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 10cce45..3b4756c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -82,12 +82,11 @@ class BPServiceActor implements Runnable {
 
   final BPOfferService bpos;
   
-  // lastBlockReport, lastDeletedReport and lastHeartbeat may be assigned/read
+  // lastBlockReport and lastHeartbeat may be assigned/read
   // by testing threads (through BPServiceActor#triggerXXX), while also 
   // assigned/read by the actor thread. Thus they should be declared as 
volatile
   // to make sure the "happens-before" consistency.
   volatile long lastBlockReport = 0;
-  volatile long lastDeletedReport = 0;
 
   boolean resetBlockReportTime = true;
 
@@ -417,10 +416,10 @@ class BPServiceActor implements Runnable {
   @VisibleForTesting
   void triggerDeletionReportForTests() {
 synchronized (pendingIncrementalBRperStorage) {
-  lastDeletedReport = 0;
+  sendImmediateIBR = true;
   pendingIncrementalBRperStorage.notifyAll();
 
-  while (lastDeletedReport == 0) {
+  while (sendImmediateIBR) {
 try {
   pendingIncrementalBRperStorage.wait(100);
 } catch (InterruptedException e) {
@@ -465,7 +464,6 @@ class BPServiceActor implements Runnable {
 // or we will report an RBW replica after the BlockReport already reports
 // a FINALIZED one.
 reportReceivedDeletedBlocks();
-lastDeletedReport = startTime;
 
 long brCreateStartTime = monotonicNow();
 Map perVolumeBlockLists =
@@ -674,7 +672,6 @@ class BPServiceActor implements Runnable {
*/
   private void offerService() throws Exception {
 LOG.info("For namenode " + nnAddr + " using"
-+ " DELETEREPORT_INTERVAL of " + dnConf.deleteReportInterval + " msec "
 + " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec"
 + " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec"
 + " Initial delay: " + dnConf.initialBlockReportDelay + "msec"
@@ -690,7 +687,9 @@ class BPServiceActor implements Runnable {
 //
 // Every so often, send heartbeat or block-report
 //
-if (startTime - lastHeartbeat >= dnConf.heartBeatInterval) {
+boolean sendHeartbeat =
+startTime - lastHeartbeat >= dnConf.heartBeatInterval;
+if (sendHeartbeat) {
   //

<    1   2   3   4   5   6   7   8   9   10   >