HDFS-9869. Erasure Coding: Rename replication-based names in BlockManager to 
more generic [part-2]. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5865fe2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5865fe2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5865fe2b

Branch: refs/heads/HADOOP-12930
Commit: 5865fe2bf01284993572ea60b3ec3bf8b4492818
Parents: 8eadd71
Author: Zhe Zhang <z...@apache.org>
Authored: Mon Apr 25 22:01:54 2016 -0700
Committer: Zhe Zhang <z...@apache.org>
Committed: Mon Apr 25 22:01:54 2016 -0700

----------------------------------------------------------------------
 .../src/site/markdown/DeprecatedProperties.md   |   3 +-
 .../apache/hadoop/hdfs/HdfsConfiguration.java   |   4 +-
 .../hdfs/client/HdfsClientConfigKeys.java       |   4 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../server/blockmanagement/BlockManager.java    | 112 ++---
 .../blockmanagement/DecommissionManager.java    |   2 +-
 .../blockmanagement/ExcessRedundancyMap.java    | 114 +++++
 .../blockmanagement/ExcessReplicaMap.java       | 106 -----
 .../PendingReconstructionBlocks.java            | 287 +++++++++++++
 .../PendingReplicationBlocks.java               | 291 -------------
 .../hadoop/hdfs/server/namenode/BackupNode.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +-
 .../src/main/resources/hdfs-default.xml         |   4 +-
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java |   2 +-
 .../apache/hadoop/hdfs/TestDatanodeDeath.java   |   4 +-
 .../apache/hadoop/hdfs/TestDecommission.java    |   2 +-
 .../hdfs/TestDecommissionWithStriped.java       |   2 +-
 .../org/apache/hadoop/hdfs/TestFileAppend2.java |   2 +-
 .../org/apache/hadoop/hdfs/TestFileAppend4.java |   2 +-
 .../org/apache/hadoop/hdfs/TestReplication.java |   6 +-
 .../hadoop/hdfs/TestSetrepIncreasing.java       |   2 +-
 .../blockmanagement/TestBlockManager.java       |  19 +-
 .../TestBlocksWithNotEnoughRacks.java           |   2 +-
 .../TestOverReplicatedBlocks.java               |   2 +-
 .../TestPendingReconstruction.java              | 418 +++++++++++++++++++
 .../blockmanagement/TestPendingReplication.java | 418 -------------------
 .../namenode/TestDecommissioningStatus.java     |   4 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |   2 +-
 .../hdfs/server/namenode/TestHostsFiles.java    |   2 +-
 .../hdfs/server/namenode/TestMetaSave.java      |   2 +-
 .../namenode/TestProcessCorruptBlocks.java      |   8 +-
 .../namenode/TestReconstructStripedBlocks.java  |   6 +-
 .../hdfs/server/namenode/ha/TestDNFencing.java  |   2 +-
 33 files changed, 930 insertions(+), 918 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
index 62e1791..adebe6e 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
@@ -45,7 +45,8 @@ The following table lists the configuration property names 
that are deprecated i
 | dfs.replication.considerLoad | dfs.namenode.replication.considerLoad |
 | dfs.replication.interval | dfs.namenode.replication.interval |
 | dfs.replication.min | dfs.namenode.replication.min |
-| dfs.replication.pending.timeout.sec | 
dfs.namenode.replication.pending.timeout-sec |
+| dfs.replication.pending.timeout.sec | 
dfs.namenode.reconstruction.pending.timeout-sec |
+| dfs.namenode.replication.pending.timeout-sec | 
dfs.namenode.reconstruction.pending.timeout-sec |
 | dfs.safemode.extension | dfs.namenode.safemode.extension |
 | dfs.safemode.threshold.pct | dfs.namenode.safemode.threshold-pct |
 | dfs.secondary.http.address | dfs.namenode.secondary.http-address |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
index b11aa4e..580e7f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
@@ -120,7 +120,9 @@ public class HdfsConfiguration extends Configuration {
         new DeprecationDelta("dfs.replication.min",
             DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY),
         new DeprecationDelta("dfs.replication.pending.timeout.sec",
-            DeprecatedKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY),
+            
DeprecatedKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY),
+        new DeprecationDelta("dfs.namenode.replication.pending.timeout-sec",
+            
DeprecatedKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY),
         new DeprecationDelta("dfs.max-repl-streams",
             DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY),
         new DeprecationDelta("dfs.permissions",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index e9b40d2..a6e2452 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -216,8 +216,8 @@ public interface HdfsClientConfigKeys {
     String DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
         "dfs.namenode.replication.interval";
     String DFS_NAMENODE_REPLICATION_MIN_KEY = "dfs.namenode.replication.min";
-    String DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY =
-        "dfs.namenode.replication.pending.timeout-sec";
+    String DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY =
+        "dfs.namenode.reconstruction.pending.timeout-sec";
     String DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
         "dfs.namenode.replication.max-streams";
     String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7802e4b..6303fb3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -213,9 +213,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int     DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
   public static final String  DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY =
       "dfs.namenode.safemode.replication.min";
-  public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY 
=
-      
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY;
-  public static final int     
DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
+
+  public static final String  
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY =
+      "dfs.namenode.reconstruction.pending.timeout-sec";
+  public static final int     
DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
+
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
       
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY;
   public static final int     DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8b50ef8..ec52122 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -147,7 +147,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final PendingDataNodeMessages pendingDNMessages =
     new PendingDataNodeMessages();
 
-  private volatile long pendingReplicationBlocksCount = 0L;
+  private volatile long pendingReconstructionBlocksCount = 0L;
   private volatile long corruptReplicaBlocksCount = 0L;
   private volatile long lowRedundancyBlocksCount = 0L;
   private volatile long scheduledReplicationBlocksCount = 0L;
@@ -161,8 +161,8 @@ public class BlockManager implements BlockStatsMXBean {
   private ObjectName mxBeanName;
 
   /** Used by metrics */
-  public long getPendingReplicationBlocksCount() {
-    return pendingReplicationBlocksCount;
+  public long getPendingReconstructionBlocksCount() {
+    return pendingReconstructionBlocksCount;
   }
   /** Used by metrics */
   public long getUnderReplicatedBlocksCount() {
@@ -186,7 +186,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
   /** Used by metrics */
   public long getExcessBlocksCount() {
-    return excessReplicas.size();
+    return excessRedundancyMap.size();
   }
   /** Used by metrics */
   public long getPostponedMisreplicatedBlocksCount() {
@@ -246,7 +246,8 @@ public class BlockManager implements BlockStatsMXBean {
    * Maps a StorageID to the set of blocks that are "extra" for this
    * DataNode. We'll eventually remove these extras.
    */
-  private final ExcessReplicaMap excessReplicas = new ExcessReplicaMap();
+  private final ExcessRedundancyMap excessRedundancyMap =
+      new ExcessRedundancyMap();
 
   /**
    * Store set of Blocks that need to be replicated 1 or more times.
@@ -256,7 +257,7 @@ public class BlockManager implements BlockStatsMXBean {
       new LowRedundancyBlocks();
 
   @VisibleForTesting
-  final PendingReplicationBlocks pendingReplications;
+  final PendingReconstructionBlocks pendingReconstruction;
 
   /** The maximum number of replicas allowed for a block */
   public final short maxReplication;
@@ -352,9 +353,10 @@ public class BlockManager implements BlockStatsMXBean {
       datanodeManager.getNetworkTopology(),
       datanodeManager.getHost2DatanodeMap());
     storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
-    pendingReplications = new PendingReplicationBlocks(conf.getInt(
-      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
-      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 
1000L);
+    pendingReconstruction = new PendingReconstructionBlocks(conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT)
+        * 1000L);
 
     blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
@@ -542,7 +544,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   public void activate(Configuration conf, long blockTotal) {
-    pendingReplications.start();
+    pendingReconstruction.start();
     datanodeManager.activate(conf);
     this.replicationThread.setName("ReplicationMonitor");
     this.replicationThread.start();
@@ -565,7 +567,7 @@ public class BlockManager implements BlockStatsMXBean {
     } catch (InterruptedException ie) {
     }
     datanodeManager.close();
-    pendingReplications.stop();
+    pendingReconstruction.stop();
     blocksMap.close();
   }
 
@@ -604,8 +606,8 @@ public class BlockManager implements BlockStatsMXBean {
       dumpBlockMeta(block, out);
     }
 
-    // Dump blocks from pendingReplication
-    pendingReplications.metaSave(out);
+    // Dump blocks from pendingReconstruction
+    pendingReconstruction.metaSave(out);
 
     // Dump blocks that are waiting to be deleted
     invalidateBlocks.dump(out);
@@ -765,7 +767,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   /**
    * If IBR is not sent from expected locations yet, add the datanodes to
-   * pendingReplications in order to keep ReplicationMonitor from scheduling
+   * pendingReconstruction in order to keep ReplicationMonitor from scheduling
    * the block.
    */
   public void addExpectedReplicasToPending(BlockInfo blk) {
@@ -780,7 +782,7 @@ public class BlockManager implements BlockStatsMXBean {
             pendingNodes.add(dnd);
           }
         }
-        pendingReplications.increment(blk,
+        pendingReconstruction.increment(blk,
             pendingNodes.toArray(new DatanodeDescriptor[pendingNodes.size()]));
       }
     }
@@ -866,7 +868,7 @@ public class BlockManager implements BlockStatsMXBean {
     neededReconstruction.remove(lastBlock, replicas.liveReplicas(),
         replicas.readOnlyReplicas(),
         replicas.decommissionedAndDecommissioning(), 
getReplication(lastBlock));
-    pendingReplications.remove(lastBlock);
+    pendingReconstruction.remove(lastBlock);
 
     // remove this block from the list of pending blocks to be deleted. 
     for (DatanodeStorageInfo storage : targets) {
@@ -1435,7 +1437,7 @@ public class BlockManager implements BlockStatsMXBean {
   
   
   void updateState() {
-    pendingReplicationBlocksCount = pendingReplications.size();
+    pendingReconstructionBlocksCount = pendingReconstruction.size();
     lowRedundancyBlocksCount = neededReconstruction.size();
     corruptReplicaBlocksCount = corruptReplicas.size();
   }
@@ -1578,8 +1580,8 @@ public class BlockManager implements BlockStatsMXBean {
       }
 
       blockLog.debug(
-          "BLOCK* neededReconstruction = {} pendingReplications = {}",
-          neededReconstruction.size(), pendingReplications.size());
+          "BLOCK* neededReconstruction = {} pendingReconstruction = {}",
+          neededReconstruction.size(), pendingReconstruction.size());
     }
 
     return scheduledWork;
@@ -1622,7 +1624,7 @@ public class BlockManager implements BlockStatsMXBean {
     // not included in the numReplicas.liveReplicas() count
     assert liveReplicaNodes.size() >= numReplicas.liveReplicas();
 
-    int pendingNum = pendingReplications.getNumReplicas(block);
+    int pendingNum = pendingReconstruction.getNumReplicas(block);
     if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum,
         requiredReplication)) {
       neededReconstruction.remove(block, priority);
@@ -1690,7 +1692,7 @@ public class BlockManager implements BlockStatsMXBean {
     // do not schedule more if enough replicas is already pending
     final short requiredReplication = getExpectedReplicaNum(block);
     NumberReplicas numReplicas = countNodes(block);
-    final int pendingNum = pendingReplications.getNumReplicas(block);
+    final int pendingNum = pendingReconstruction.getNumReplicas(block);
     if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum,
         requiredReplication)) {
       neededReconstruction.remove(block, priority);
@@ -1718,8 +1720,8 @@ public class BlockManager implements BlockStatsMXBean {
 
     // Move the block-replication into a "pending" state.
     // The reason we use 'pending' is so we can retry
-    // replications that fail after an appropriate amount of time.
-    pendingReplications.increment(block,
+    // reconstructions that fail after an appropriate amount of time.
+    pendingReconstruction.increment(block,
         DatanodeStorageInfo.toDatanodeDescriptors(targets));
     blockLog.debug("BLOCK* block {} is moved from neededReplications to "
         + "pendingReplications", block);
@@ -1907,11 +1909,11 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * If there were any replication requests that timed out, reap them
-   * and put them back into the neededReplication queue
+   * If there were any reconstruction requests that timed out, reap them
+   * and put them back into the neededReconstruction queue
    */
-  private void processPendingReplications() {
-    BlockInfo[] timedOutItems = pendingReplications.getTimedOutBlocks();
+  private void processPendingReconstructions() {
+    BlockInfo[] timedOutItems = pendingReconstruction.getTimedOutBlocks();
     if (timedOutItems != null) {
       namesystem.writeLock();
       try {
@@ -2890,7 +2892,7 @@ public class BlockManager implements BlockStatsMXBean {
     // Now check for completion of blocks and safe block count
     NumberReplicas num = countNodes(storedBlock);
     int numLiveReplicas = num.liveReplicas();
-    int pendingNum = pendingReplications.getNumReplicas(storedBlock);
+    int pendingNum = pendingReconstruction.getNumReplicas(storedBlock);
     int numCurrentReplica = numLiveReplicas + pendingNum;
 
     if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
@@ -3203,8 +3205,8 @@ public class BlockManager implements BlockStatsMXBean {
 
   /**
    * Find how many of the containing nodes are "extra", if any.
-   * If there are any extras, call chooseExcessReplicates() to
-   * mark them in the excessReplicateMap.
+   * If there are any extras, call chooseExcessRedundancies() to
+   * mark them in the excessRedundancyMap.
    */
   private void processExtraRedundancyBlock(final BlockInfo block,
       final short replication, final DatanodeDescriptor addedNode,
@@ -3237,11 +3239,11 @@ public class BlockManager implements BlockStatsMXBean {
         }
       }
     }
-    chooseExcessReplicates(nonExcess, block, replication, addedNode,
+    chooseExcessRedundancies(nonExcess, block, replication, addedNode,
         delNodeHint);
   }
 
-  private void chooseExcessReplicates(
+  private void chooseExcessRedundancies(
       final Collection<DatanodeStorageInfo> nonExcess,
       BlockInfo storedBlock, short replication,
       DatanodeDescriptor addedNode,
@@ -3250,19 +3252,19 @@ public class BlockManager implements BlockStatsMXBean {
     // first form a rack to datanodes map and
     BlockCollection bc = getBlockCollection(storedBlock);
     if (storedBlock.isStriped()) {
-      chooseExcessReplicasStriped(bc, nonExcess, storedBlock, delNodeHint);
+      chooseExcessRedundancyStriped(bc, nonExcess, storedBlock, delNodeHint);
     } else {
       final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(
           bc.getStoragePolicyID());
       final List<StorageType> excessTypes = storagePolicy.chooseExcess(
           replication, DatanodeStorageInfo.toStorageTypes(nonExcess));
-      chooseExcessReplicasContiguous(nonExcess, storedBlock, replication,
+      chooseExcessRedundancyContiguous(nonExcess, storedBlock, replication,
           addedNode, delNodeHint, excessTypes);
     }
   }
 
   /**
-   * We want "replication" replicates for the block, but we now have too many. 
 
+   * We want sufficient redundancy for the block, but we now have too many.
    * In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such 
that:
    *
    * srcNodes.size() - dstNodes.size() == replication
@@ -3275,7 +3277,7 @@ public class BlockManager implements BlockStatsMXBean {
    * If no such a node is available,
    * then pick a node with least free space
    */
-  private void chooseExcessReplicasContiguous(
+  private void chooseExcessRedundancyContiguous(
       final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock,
       short replication, DatanodeDescriptor addedNode,
       DatanodeDescriptor delNodeHint, List<StorageType> excessTypes) {
@@ -3284,7 +3286,7 @@ public class BlockManager implements BlockStatsMXBean {
         .chooseReplicasToDelete(nonExcess, nonExcess, replication, excessTypes,
             addedNode, delNodeHint);
     for (DatanodeStorageInfo choosenReplica : replicasToDelete) {
-      processChosenExcessReplica(nonExcess, choosenReplica, storedBlock);
+      processChosenExcessRedundancy(nonExcess, choosenReplica, storedBlock);
     }
   }
 
@@ -3297,7 +3299,7 @@ public class BlockManager implements BlockStatsMXBean {
    * The block placement policy will make sure that the left internal blocks 
are
    * spread across racks and also try hard to pick one with least free space.
    */
-  private void chooseExcessReplicasStriped(BlockCollection bc,
+  private void chooseExcessRedundancyStriped(BlockCollection bc,
       final Collection<DatanodeStorageInfo> nonExcess,
       BlockInfo storedBlock,
       DatanodeDescriptor delNodeHint) {
@@ -3325,7 +3327,7 @@ public class BlockManager implements BlockStatsMXBean {
     if (delStorageHint != null) {
       Integer index = storage2index.get(delStorageHint);
       if (index != null && duplicated.get(index)) {
-        processChosenExcessReplica(nonExcess, delStorageHint, storedBlock);
+        processChosenExcessRedundancy(nonExcess, delStorageHint, storedBlock);
       }
     }
 
@@ -3357,7 +3359,7 @@ public class BlockManager implements BlockStatsMXBean {
             .chooseReplicasToDelete(nonExcess, candidates, (short) 1,
                 excessTypes, null, null);
         for (DatanodeStorageInfo chosen : replicasToDelete) {
-          processChosenExcessReplica(nonExcess, chosen, storedBlock);
+          processChosenExcessRedundancy(nonExcess, chosen, storedBlock);
           candidates.remove(chosen);
         }
       }
@@ -3365,11 +3367,11 @@ public class BlockManager implements BlockStatsMXBean {
     }
   }
 
-  private void processChosenExcessReplica(
+  private void processChosenExcessRedundancy(
       final Collection<DatanodeStorageInfo> nonExcess,
       final DatanodeStorageInfo chosen, BlockInfo storedBlock) {
     nonExcess.remove(chosen);
-    excessReplicas.add(chosen.getDatanodeDescriptor(), storedBlock);
+    excessRedundancyMap.add(chosen.getDatanodeDescriptor(), storedBlock);
     //
     // The 'excessblocks' tracks blocks until we get confirmation
     // that the datanode has deleted them; the only way we remove them
@@ -3381,7 +3383,7 @@ public class BlockManager implements BlockStatsMXBean {
     //
     final Block blockToInvalidate = getBlockOnStorage(storedBlock, chosen);
     addToInvalidates(blockToInvalidate, chosen.getDatanodeDescriptor());
-    blockLog.debug("BLOCK* chooseExcessReplicates: "
+    blockLog.debug("BLOCK* chooseExcessRedundancies: "
         + "({}, {}) is added to invalidated blocks set", chosen, storedBlock);
   }
 
@@ -3433,7 +3435,7 @@ public class BlockManager implements BlockStatsMXBean {
         updateNeededReconstructions(storedBlock, -1, 0);
       }
 
-      excessReplicas.remove(node, storedBlock);
+      excessRedundancyMap.remove(node, storedBlock);
       corruptReplicas.removeFromCorruptReplicasMap(storedBlock, node);
     }
   }
@@ -3504,7 +3506,7 @@ public class BlockManager implements BlockStatsMXBean {
     //
     BlockInfo storedBlock = getStoredBlock(block);
     if (storedBlock != null) {
-      pendingReplications.decrement(storedBlock, node);
+      pendingReconstruction.decrement(storedBlock, node);
     }
     processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
         delHintNode);
@@ -3749,11 +3751,11 @@ public class BlockManager implements BlockStatsMXBean {
 
   @VisibleForTesting
   int getExcessSize4Testing(String dnUuid) {
-    return excessReplicas.getSize4Testing(dnUuid);
+    return excessRedundancyMap.getSize4Testing(dnUuid);
   }
 
   public boolean isExcess(DatanodeDescriptor dn, BlockInfo blk) {
-    return excessReplicas.contains(dn, blk);
+    return excessRedundancyMap.contains(dn, blk);
   }
 
   /** 
@@ -3813,7 +3815,7 @@ public class BlockManager implements BlockStatsMXBean {
     }
 
     updateState();
-    if (pendingReplicationBlocksCount == 0 &&
+    if (pendingReconstructionBlocksCount == 0 &&
         lowRedundancyBlocksCount == 0) {
       LOG.info("Node {} is dead and there are no low redundancy" +
           " blocks or blocks pending reconstruction. Safe to decommission.",
@@ -3860,8 +3862,8 @@ public class BlockManager implements BlockStatsMXBean {
     block.setNumBytes(BlockCommand.NO_ACK);
     addToInvalidates(block);
     removeBlockFromMap(block);
-    // Remove the block from pendingReplications and neededReconstruction
-    pendingReplications.remove(block);
+    // Remove the block from pendingReconstruction and neededReconstruction
+    pendingReconstruction.remove(block);
     neededReconstruction.remove(block, LowRedundancyBlocks.LEVEL);
     if (postponedMisreplicatedBlocks.remove(block)) {
       postponedMisreplicatedBlocksCount.decrementAndGet();
@@ -3919,7 +3921,7 @@ public class BlockManager implements BlockStatsMXBean {
     for (BlockInfo block : bc.getBlocks()) {
       short expected = getExpectedReplicaNum(block);
       final NumberReplicas n = countNodes(block);
-      final int pending = pendingReplications.getNumReplicas(block);
+      final int pending = pendingReconstruction.getNumReplicas(block);
       if (!hasEnoughEffectiveReplicas(block, n, pending, expected)) {
         neededReconstruction.add(block, n.liveReplicas() + pending,
             n.readOnlyReplicas(),
@@ -4059,7 +4061,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   public void removeBlockFromMap(BlockInfo block) {
     for(DatanodeStorageInfo info : blocksMap.getStorages(block)) {
-      excessReplicas.remove(info.getDatanodeDescriptor(), block);
+      excessRedundancyMap.remove(info.getDatanodeDescriptor(), block);
     }
 
     blocksMap.removeBlock(block);
@@ -4110,7 +4112,7 @@ public class BlockManager implements BlockStatsMXBean {
           // Process replication work only when active NN is out of safe mode.
           if (isPopulatingReplQueues()) {
             computeDatanodeWork();
-            processPendingReplications();
+            processPendingReconstructions();
             rescanPostponedMisreplicatedBlocks();
           }
           Thread.sleep(replicationRecheckInterval);
@@ -4258,8 +4260,8 @@ public class BlockManager implements BlockStatsMXBean {
    */
   public void clearQueues() {
     neededReconstruction.clear();
-    pendingReplications.clear();
-    excessReplicas.clear();
+    pendingReconstruction.clear();
+    excessRedundancyMap.clear();
     invalidateBlocks.clear();
     datanodeManager.clearPendingQueues();
     postponedMisreplicatedBlocks.clear();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 3b5f103..908c843 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -541,7 +541,7 @@ public class DecommissionManager {
         // pending
         if (blockManager.isNeededReconstruction(block, liveReplicas)) {
           if (!blockManager.neededReconstruction.contains(block) &&
-              blockManager.pendingReplications.getNumReplicas(block) == 0 &&
+              blockManager.pendingReconstruction.getNumReplicas(block) == 0 &&
               blockManager.isPopulatingReplQueues()) {
             // Process these blocks only when active NN is out of safe mode.
             blockManager.neededReconstruction.add(block,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessRedundancyMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessRedundancyMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessRedundancyMap.java
new file mode 100644
index 0000000..ccdcf54
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessRedundancyMap.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.util.LightWeightHashSet;
+import org.slf4j.Logger;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Maps a datnode to the set of excess redundancy details.
+ *
+ * This class is thread safe.
+ */
+class ExcessRedundancyMap {
+  public static final Logger blockLog = NameNode.blockStateChangeLog;
+
+  private final Map<String, LightWeightHashSet<BlockInfo>> map =new 
HashMap<>();
+  private final AtomicLong size = new AtomicLong(0L);
+
+  /**
+   * @return the number of redundancies in this map.
+   */
+  long size() {
+    return size.get();
+  }
+
+  /**
+   * @return the number of redundancies corresponding to the given datanode.
+   */
+  @VisibleForTesting
+  synchronized int getSize4Testing(String dnUuid) {
+    final LightWeightHashSet<BlockInfo> set = map.get(dnUuid);
+    return set == null? 0: set.size();
+  }
+
+  synchronized void clear() {
+    map.clear();
+    size.set(0L);
+  }
+
+  /**
+   * @return does this map contains a redundancy corresponding to the given
+   *         datanode and the given block?
+   */
+  synchronized boolean contains(DatanodeDescriptor dn, BlockInfo blk) {
+    final LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());
+    return set != null && set.contains(blk);
+  }
+
+  /**
+   * Add the redundancy of the given block stored in the given datanode to the
+   * map.
+   *
+   * @return true if the block is added.
+   */
+  synchronized boolean add(DatanodeDescriptor dn, BlockInfo blk) {
+    LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());
+    if (set == null) {
+      set = new LightWeightHashSet<>();
+      map.put(dn.getDatanodeUuid(), set);
+    }
+    final boolean added = set.add(blk);
+    if (added) {
+      size.incrementAndGet();
+      blockLog.debug("BLOCK* ExcessRedundancyMap.add({}, {})", dn, blk);
+    }
+    return added;
+  }
+
+  /**
+   * Remove the redundancy corresponding to the given datanode and the given
+   * block.
+   *
+   * @return true if the block is removed.
+   */
+  synchronized boolean remove(DatanodeDescriptor dn, BlockInfo blk) {
+    final LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());
+    if (set == null) {
+      return false;
+    }
+
+    final boolean removed = set.remove(blk);
+    if (removed) {
+      size.decrementAndGet();
+      blockLog.debug("BLOCK* ExcessRedundancyMap.remove({}, {})", dn, blk);
+
+      if (set.isEmpty()) {
+        map.remove(dn.getDatanodeUuid());
+      }
+    }
+    return removed;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessReplicaMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessReplicaMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessReplicaMap.java
deleted file mode 100644
index 00aa902..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ExcessReplicaMap.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.blockmanagement;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.util.LightWeightHashSet;
-import org.slf4j.Logger;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Maps a datnode to the set of excess replicas.
- *
- * This class is thread safe.
- */
-class ExcessReplicaMap {
-  public static final Logger blockLog = NameNode.blockStateChangeLog;
-
-  private final Map<String, LightWeightHashSet<BlockInfo>> map =new 
HashMap<>();
-  private final AtomicLong size = new AtomicLong(0L);
-
-  /** @return the number of replicas in this map. */
-  long size() {
-    return size.get();
-  }
-
-  /** @return the number of replicas corresponding to the given datanode. */
-  @VisibleForTesting
-  synchronized int getSize4Testing(String dnUuid) {
-    final LightWeightHashSet<BlockInfo> set = map.get(dnUuid);
-    return set == null? 0: set.size();
-  }
-
-  synchronized void clear() {
-    map.clear();
-    size.set(0L);
-  }
-
-  /**
-   * @return does this map contains a replica corresponding to the given
-   *         datanode and the given block?
-   */
-  synchronized boolean contains(DatanodeDescriptor dn, BlockInfo blk) {
-    final LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());
-    return set != null && set.contains(blk);
-  }
-
-  /**
-   * Add the replica of the given block stored in the given datanode to the 
map.
-   * @return true if the block is added.
-   */
-  synchronized boolean add(DatanodeDescriptor dn, BlockInfo blk) {
-    LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());
-    if (set == null) {
-      set = new LightWeightHashSet<>();
-      map.put(dn.getDatanodeUuid(), set);
-    }
-    final boolean added = set.add(blk);
-    if (added) {
-      size.incrementAndGet();
-      blockLog.debug("BLOCK* ExcessReplicaMap.add({}, {})", dn, blk);
-    }
-    return added;
-  }
-
-  /**
-   * Remove the replica corresponding to the given datanode and the given 
block.
-   * @return true if the block is removed.
-   */
-  synchronized boolean remove(DatanodeDescriptor dn, BlockInfo blk) {
-    final LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());
-    if (set == null) {
-      return false;
-    }
-
-    final boolean removed = set.remove(blk);
-    if (removed) {
-      size.decrementAndGet();
-      blockLog.debug("BLOCK* ExcessReplicaMap.remove({}, {})", dn, blk);
-
-      if (set.isEmpty()) {
-        map.remove(dn.getDatanodeUuid());
-      }
-    }
-    return removed;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
new file mode 100644
index 0000000..528199c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
@@ -0,0 +1,287 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import static org.apache.hadoop.util.Time.monotonicNow;
+
+import java.io.PrintWriter;
+import java.sql.Time;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.util.Daemon;
+import org.slf4j.Logger;
+
+/***************************************************
+ * PendingReconstructionBlocks does the bookkeeping of all
+ * blocks that gains stronger redundancy.
+ *
+ * It does the following:
+ * 1)  record blocks that gains stronger redundancy at this instant.
+ * 2)  a coarse grain timer to track age of reconstruction request
+ * 3)  a thread that periodically identifies reconstruction-requests
+ *     that never made it.
+ *
+ ***************************************************/
+class PendingReconstructionBlocks {
+  private static final Logger LOG = BlockManager.LOG;
+
+  private final Map<BlockInfo, PendingBlockInfo> pendingReconstructions;
+  private final ArrayList<BlockInfo> timedOutItems;
+  Daemon timerThread = null;
+  private volatile boolean fsRunning = true;
+
+  //
+  // It might take anywhere between 5 to 10 minutes before
+  // a request is timed out.
+  //
+  private long timeout = 5 * 60 * 1000;
+  private final static long DEFAULT_RECHECK_INTERVAL = 5 * 60 * 1000;
+
+  PendingReconstructionBlocks(long timeoutPeriod) {
+    if ( timeoutPeriod > 0 ) {
+      this.timeout = timeoutPeriod;
+    }
+    pendingReconstructions = new HashMap<>();
+    timedOutItems = new ArrayList<>();
+  }
+
+  void start() {
+    timerThread = new Daemon(new PendingReconstructionMonitor());
+    timerThread.start();
+  }
+
+  /**
+   * Add a block to the list of pending reconstructions
+   * @param block The corresponding block
+   * @param targets The DataNodes where replicas of the block should be placed
+   */
+  void increment(BlockInfo block, DatanodeDescriptor... targets) {
+    synchronized (pendingReconstructions) {
+      PendingBlockInfo found = pendingReconstructions.get(block);
+      if (found == null) {
+        pendingReconstructions.put(block, new PendingBlockInfo(targets));
+      } else {
+        found.incrementReplicas(targets);
+        found.setTimeStamp();
+      }
+    }
+  }
+
+  /**
+   * One reconstruction request for this block has finished.
+   * Decrement the number of pending reconstruction requests
+   * for this block.
+   *
+   * @param dn The DataNode that finishes the reconstruction
+   */
+  void decrement(BlockInfo block, DatanodeDescriptor dn) {
+    synchronized (pendingReconstructions) {
+      PendingBlockInfo found = pendingReconstructions.get(block);
+      if (found != null) {
+        LOG.debug("Removing pending reconstruction for {}", block);
+        found.decrementReplicas(dn);
+        if (found.getNumReplicas() <= 0) {
+          pendingReconstructions.remove(block);
+        }
+      }
+    }
+  }
+
+  /**
+   * Remove the record about the given block from pending reconstructions.
+   *
+   * @param block
+   *          The given block whose pending reconstruction requests need to be
+   *          removed
+   */
+  void remove(BlockInfo block) {
+    synchronized (pendingReconstructions) {
+      pendingReconstructions.remove(block);
+    }
+  }
+
+  public void clear() {
+    synchronized (pendingReconstructions) {
+      pendingReconstructions.clear();
+      timedOutItems.clear();
+    }
+  }
+
+  /**
+   * The total number of blocks that are undergoing reconstruction.
+   */
+  int size() {
+    return pendingReconstructions.size();
+  }
+
+  /**
+   * How many copies of this block is pending reconstruction?.
+   */
+  int getNumReplicas(BlockInfo block) {
+    synchronized (pendingReconstructions) {
+      PendingBlockInfo found = pendingReconstructions.get(block);
+      if (found != null) {
+        return found.getNumReplicas();
+      }
+    }
+    return 0;
+  }
+
+  /**
+   * Returns a list of blocks that have timed out their
+   * reconstruction requests. Returns null if no blocks have
+   * timed out.
+   */
+  BlockInfo[] getTimedOutBlocks() {
+    synchronized (timedOutItems) {
+      if (timedOutItems.size() <= 0) {
+        return null;
+      }
+      BlockInfo[] blockList = timedOutItems.toArray(
+          new BlockInfo[timedOutItems.size()]);
+      timedOutItems.clear();
+      return blockList;
+    }
+  }
+
+  /**
+   * An object that contains information about a block that
+   * is being reconstructed. It records the timestamp when the
+   * system started reconstructing the most recent copy of this
+   * block. It also records the list of Datanodes where the
+   * reconstruction requests are in progress.
+   */
+  static class PendingBlockInfo {
+    private long timeStamp;
+    private final List<DatanodeDescriptor> targets;
+
+    PendingBlockInfo(DatanodeDescriptor[] targets) {
+      this.timeStamp = monotonicNow();
+      this.targets = targets == null ? new ArrayList<DatanodeDescriptor>()
+          : new ArrayList<>(Arrays.asList(targets));
+    }
+
+    long getTimeStamp() {
+      return timeStamp;
+    }
+
+    void setTimeStamp() {
+      timeStamp = monotonicNow();
+    }
+
+    void incrementReplicas(DatanodeDescriptor... newTargets) {
+      if (newTargets != null) {
+        for (DatanodeDescriptor newTarget : newTargets) {
+          if (!targets.contains(newTarget)) {
+            targets.add(newTarget);
+          }
+        }
+      }
+    }
+
+    void decrementReplicas(DatanodeDescriptor dn) {
+      targets.remove(dn);
+    }
+
+    int getNumReplicas() {
+      return targets.size();
+    }
+  }
+
+  /*
+   * A periodic thread that scans for blocks that never finished
+   * their reconstruction request.
+   */
+  class PendingReconstructionMonitor implements Runnable {
+    @Override
+    public void run() {
+      while (fsRunning) {
+        long period = Math.min(DEFAULT_RECHECK_INTERVAL, timeout);
+        try {
+          pendingReconstructionCheck();
+          Thread.sleep(period);
+        } catch (InterruptedException ie) {
+          LOG.debug("PendingReconstructionMonitor thread is interrupted.", ie);
+        }
+      }
+    }
+
+    /**
+     * Iterate through all items and detect timed-out items
+     */
+    void pendingReconstructionCheck() {
+      synchronized (pendingReconstructions) {
+        Iterator<Map.Entry<BlockInfo, PendingBlockInfo>> iter =
+            pendingReconstructions.entrySet().iterator();
+        long now = monotonicNow();
+        LOG.debug("PendingReconstructionMonitor checking Q");
+        while (iter.hasNext()) {
+          Map.Entry<BlockInfo, PendingBlockInfo> entry = iter.next();
+          PendingBlockInfo pendingBlock = entry.getValue();
+          if (now > pendingBlock.getTimeStamp() + timeout) {
+            BlockInfo block = entry.getKey();
+            synchronized (timedOutItems) {
+              timedOutItems.add(block);
+            }
+            LOG.warn("PendingReconstructionMonitor timed out " + block);
+            iter.remove();
+          }
+        }
+      }
+    }
+  }
+
+  /*
+   * Shuts down the pending reconstruction monitor thread.
+   * Waits for the thread to exit.
+   */
+  void stop() {
+    fsRunning = false;
+    if(timerThread == null) return;
+    timerThread.interrupt();
+    try {
+      timerThread.join(3000);
+    } catch (InterruptedException ie) {
+    }
+  }
+
+  /**
+   * Iterate through all items and print them.
+   */
+  void metaSave(PrintWriter out) {
+    synchronized (pendingReconstructions) {
+      out.println("Metasave: Blocks being reconstructed: " +
+                  pendingReconstructions.size());
+      for (Map.Entry<BlockInfo, PendingBlockInfo> entry :
+          pendingReconstructions.entrySet()) {
+        PendingBlockInfo pendingBlock = entry.getValue();
+        Block block = entry.getKey();
+        out.println(block +
+            " StartTime: " + new Time(pendingBlock.timeStamp) +
+            " NumReconstructInProgress: " +
+            pendingBlock.getNumReplicas());
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
deleted file mode 100644
index 71939de..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.blockmanagement;
-
-import static org.apache.hadoop.util.Time.monotonicNow;
-
-import java.io.PrintWriter;
-import java.sql.Time;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.util.Daemon;
-import org.slf4j.Logger;
-
-/***************************************************
- * PendingReplicationBlocks does the bookkeeping of all
- * blocks that are getting replicated.
- *
- * It does the following:
- * 1)  record blocks that are getting replicated at this instant.
- * 2)  a coarse grain timer to track age of replication request
- * 3)  a thread that periodically identifies replication-requests
- *     that never made it.
- *
- ***************************************************/
-class PendingReplicationBlocks {
-  private static final Logger LOG = BlockManager.LOG;
-
-  private final Map<BlockInfo, PendingBlockInfo> pendingReplications;
-  private final ArrayList<BlockInfo> timedOutItems;
-  Daemon timerThread = null;
-  private volatile boolean fsRunning = true;
-
-  //
-  // It might take anywhere between 5 to 10 minutes before
-  // a request is timed out.
-  //
-  private long timeout = 5 * 60 * 1000;
-  private final static long DEFAULT_RECHECK_INTERVAL = 5 * 60 * 1000;
-
-  PendingReplicationBlocks(long timeoutPeriod) {
-    if ( timeoutPeriod > 0 ) {
-      this.timeout = timeoutPeriod;
-    }
-    pendingReplications = new HashMap<>();
-    timedOutItems = new ArrayList<>();
-  }
-
-  void start() {
-    timerThread = new Daemon(new PendingReplicationMonitor());
-    timerThread.start();
-  }
-
-  /**
-   * Add a block to the list of pending Replications
-   * @param block The corresponding block
-   * @param targets The DataNodes where replicas of the block should be placed
-   */
-  void increment(BlockInfo block, DatanodeDescriptor... targets) {
-    synchronized (pendingReplications) {
-      PendingBlockInfo found = pendingReplications.get(block);
-      if (found == null) {
-        pendingReplications.put(block, new PendingBlockInfo(targets));
-      } else {
-        found.incrementReplicas(targets);
-        found.setTimeStamp();
-      }
-    }
-  }
-
-  /**
-   * One replication request for this block has finished.
-   * Decrement the number of pending replication requests
-   * for this block.
-   * 
-   * @param dn The DataNode that finishes the replication
-   */
-  void decrement(BlockInfo block, DatanodeDescriptor dn) {
-    synchronized (pendingReplications) {
-      PendingBlockInfo found = pendingReplications.get(block);
-      if (found != null) {
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("Removing pending replication for " + block);
-        }
-        found.decrementReplicas(dn);
-        if (found.getNumReplicas() <= 0) {
-          pendingReplications.remove(block);
-        }
-      }
-    }
-  }
-
-  /**
-   * Remove the record about the given block from pendingReplications.
-   * @param block The given block whose pending replication requests need to be
-   *              removed
-   */
-  void remove(BlockInfo block) {
-    synchronized (pendingReplications) {
-      pendingReplications.remove(block);
-    }
-  }
-
-  public void clear() {
-    synchronized (pendingReplications) {
-      pendingReplications.clear();
-      timedOutItems.clear();
-    }
-  }
-
-  /**
-   * The total number of blocks that are undergoing replication
-   */
-  int size() {
-    return pendingReplications.size();
-  } 
-
-  /**
-   * How many copies of this block is pending replication?
-   */
-  int getNumReplicas(BlockInfo block) {
-    synchronized (pendingReplications) {
-      PendingBlockInfo found = pendingReplications.get(block);
-      if (found != null) {
-        return found.getNumReplicas();
-      }
-    }
-    return 0;
-  }
-
-  /**
-   * Returns a list of blocks that have timed out their 
-   * replication requests. Returns null if no blocks have
-   * timed out.
-   */
-  BlockInfo[] getTimedOutBlocks() {
-    synchronized (timedOutItems) {
-      if (timedOutItems.size() <= 0) {
-        return null;
-      }
-      BlockInfo[] blockList = timedOutItems.toArray(
-          new BlockInfo[timedOutItems.size()]);
-      timedOutItems.clear();
-      return blockList;
-    }
-  }
-
-  /**
-   * An object that contains information about a block that 
-   * is being replicated. It records the timestamp when the 
-   * system started replicating the most recent copy of this
-   * block. It also records the list of Datanodes where the 
-   * replication requests are in progress.
-   */
-  static class PendingBlockInfo {
-    private long timeStamp;
-    private final List<DatanodeDescriptor> targets;
-
-    PendingBlockInfo(DatanodeDescriptor[] targets) {
-      this.timeStamp = monotonicNow();
-      this.targets = targets == null ? new ArrayList<DatanodeDescriptor>()
-          : new ArrayList<>(Arrays.asList(targets));
-    }
-
-    long getTimeStamp() {
-      return timeStamp;
-    }
-
-    void setTimeStamp() {
-      timeStamp = monotonicNow();
-    }
-
-    void incrementReplicas(DatanodeDescriptor... newTargets) {
-      if (newTargets != null) {
-        for (DatanodeDescriptor newTarget : newTargets) {
-          if (!targets.contains(newTarget)) {
-            targets.add(newTarget);
-          }
-        }
-      }
-    }
-
-    void decrementReplicas(DatanodeDescriptor dn) {
-      targets.remove(dn);
-    }
-
-    int getNumReplicas() {
-      return targets.size();
-    }
-  }
-
-  /*
-   * A periodic thread that scans for blocks that never finished
-   * their replication request.
-   */
-  class PendingReplicationMonitor implements Runnable {
-    @Override
-    public void run() {
-      while (fsRunning) {
-        long period = Math.min(DEFAULT_RECHECK_INTERVAL, timeout);
-        try {
-          pendingReplicationCheck();
-          Thread.sleep(period);
-        } catch (InterruptedException ie) {
-          if(LOG.isDebugEnabled()) {
-            LOG.debug("PendingReplicationMonitor thread is interrupted.", ie);
-          }
-        }
-      }
-    }
-
-    /**
-     * Iterate through all items and detect timed-out items
-     */
-    void pendingReplicationCheck() {
-      synchronized (pendingReplications) {
-        Iterator<Map.Entry<BlockInfo, PendingBlockInfo>> iter =
-                                    pendingReplications.entrySet().iterator();
-        long now = monotonicNow();
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("PendingReplicationMonitor checking Q");
-        }
-        while (iter.hasNext()) {
-          Map.Entry<BlockInfo, PendingBlockInfo> entry = iter.next();
-          PendingBlockInfo pendingBlock = entry.getValue();
-          if (now > pendingBlock.getTimeStamp() + timeout) {
-            BlockInfo block = entry.getKey();
-            synchronized (timedOutItems) {
-              timedOutItems.add(block);
-            }
-            LOG.warn("PendingReplicationMonitor timed out " + block);
-            iter.remove();
-          }
-        }
-      }
-    }
-  }
-
-  /*
-   * Shuts down the pending replication monitor thread.
-   * Waits for the thread to exit.
-   */
-  void stop() {
-    fsRunning = false;
-    if(timerThread == null) return;
-    timerThread.interrupt();
-    try {
-      timerThread.join(3000);
-    } catch (InterruptedException ie) {
-    }
-  }
-
-  /**
-   * Iterate through all items and print them.
-   */
-  void metaSave(PrintWriter out) {
-    synchronized (pendingReplications) {
-      out.println("Metasave: Blocks being replicated: " +
-                  pendingReplications.size());
-      for (Map.Entry<BlockInfo, PendingBlockInfo> entry :
-          pendingReplications.entrySet()) {
-        PendingBlockInfo pendingBlock = entry.getValue();
-        Block block = entry.getKey();
-        out.println(block +
-            " StartTime: " + new Time(pendingBlock.timeStamp) +
-            " NumReplicaInProgress: " +
-            pendingBlock.getNumReplicas());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index d36e0b9..899f0d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -472,8 +472,8 @@ public class BackupNode extends NameNode {
      * {@link BlockManager.ReplicationMonitor} protected by SafeMode.
      * {@link HeartbeatManager.Monitor} protected by SafeMode.
      * {@link DecommissionManager.Monitor} need to prohibit refreshNodes().
-     * {@link PendingReplicationBlocks.PendingReplicationMonitor} harmless,
-     * because ReplicationMonitor is muted.
+     * {@link PendingReconstructionBlocks.PendingReconstructionMonitor}
+     * harmless, because ReplicationMonitor is muted.
      */
     @Override
     public void startActiveServices() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1f7a2f0..722d283 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4469,7 +4469,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Override // FSNamesystemMBean
   @Metric
   public long getPendingReplicationBlocks() {
-    return blockManager.getPendingReplicationBlocksCount();
+    return blockManager.getPendingReconstructionBlocksCount();
   }
 
   @Override // FSNamesystemMBean

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c6e02a9..a6ed12e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3790,10 +3790,10 @@
 </property>
 
 <property>
-  <name>dfs.namenode.replication.pending.timeout-sec</name>
+  <name>dfs.namenode.reconstruction.pending.timeout-sec</name>
   <value>-1</value>
   <description>
-    Timeout in seconds for block replication.  If this value is 0 or less,
+    Timeout in seconds for block reconstruction.  If this value is 0 or less,
     then it will default to 5 minutes.
   </description>
 </property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index a1169c3..f41f809 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -85,7 +85,7 @@ public class TestAppendSnapshotTruncate {
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
     conf.setLong(
-        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
     conf.setBoolean(ReplaceDatanodeOnFailure.BEST_EFFORT_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf)
         .format(true)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
index cffeda3..d75e9ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
@@ -286,7 +286,7 @@ public class TestDatanodeDeath {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
2000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);
-    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
2);
     conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                
.numDataNodes(numDatanodes).build();
@@ -342,7 +342,7 @@ public class TestDatanodeDeath {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
2000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
-    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
2);
     conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
     int myMaxNodes = 5;
     System.out.println("SimpleTest starting with DataNode to Kill " + 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index edc81ae..f6b5d8f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -114,7 +114,7 @@ public class TestDecommission {
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 
BLOCKREPORT_INTERVAL_MSEC);
-    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
+    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
4);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
NAMENODE_REPLICATION_INTERVAL);
   
     writeConfigFile(hostsFile, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index c0d8268..598e76f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -124,7 +124,7 @@ public class TestDecommissionWithStriped {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
         BLOCKREPORT_INTERVAL_MSEC);
-    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
+    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
         4);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
         NAMENODE_REPLICATION_INTERVAL);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
index cd1b851..28e51b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
@@ -481,7 +481,7 @@ public class TestFileAppend2 {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
2000);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);
-    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
+    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
2);
     conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
     conf.setInt(HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 
30000);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
index 265b510..4147851 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
@@ -84,7 +84,7 @@ public class TestFileAppend4 {
     conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
     // handle under-replicated blocks quickly (for replication asserts)
     conf.setInt(
-        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 5);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
     
     // handle failures in the DFSClient pipeline quickly

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index ca1092d..e0e0456 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -410,7 +410,7 @@ public class TestReplication {
       LOG.info("Restarting minicluster after deleting a replica and corrupting 
2 crcs");
       conf = new HdfsConfiguration();
       conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, 
Integer.toString(numDataNodes));
-      conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 
Integer.toString(2));
+      
conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
Integer.toString(2));
       conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
       conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 
"0.75f"); // only 3 copies exist
       
@@ -507,7 +507,7 @@ public class TestReplication {
     try {
       Configuration conf = new HdfsConfiguration();
       conf.setLong(
-          DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+          DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
1);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
           .storagesPerDatanode(1).build();
       FileSystem fs = cluster.getFileSystem();
@@ -687,7 +687,7 @@ public class TestReplication {
 
   private long pendingReplicationCount(BlockManager bm) {
     BlockManagerTestUtil.updateState(bm);
-    return bm.getPendingReplicationBlocksCount();
+    return bm.getPendingReconstructionBlocksCount();
   }
 
   private void assertNoReplicationWasPerformed(MiniDFSCluster cluster) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
index a055adc..fee30b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
@@ -39,7 +39,7 @@ public class TestSetrepIncreasing {
     }
     conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "" + fromREP);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 
Integer.toString(2));
+    
conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
Integer.toString(2));
     MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(10).build();
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof 
DistributedFileSystem);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 3a974e4..bdcc251 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -537,15 +537,16 @@ public class TestBlockManager {
     list_all.add(new ArrayList<BlockInfo>()); // for priority 0
     list_all.add(list_p1); // for priority 1
 
-    assertEquals("Block not initially pending replication", 0,
-        bm.pendingReplications.getNumReplicas(block));
+    assertEquals("Block not initially pending reconstruction", 0,
+        bm.pendingReconstruction.getNumReplicas(block));
     assertEquals(
-        "computeBlockReconstructionWork should indicate replication is needed",
+        "computeBlockReconstructionWork should indicate reconstruction is 
needed",
         1, bm.computeReconstructionWorkForBlocks(list_all));
-    assertTrue("replication is pending after work is computed",
-        bm.pendingReplications.getNumReplicas(block) > 0);
+    assertTrue("reconstruction is pending after work is computed",
+        bm.pendingReconstruction.getNumReplicas(block) > 0);
 
-    LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> repls = 
getAllPendingReplications();
+    LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> repls =
+        getAllPendingReconstruction();
     assertEquals(1, repls.size());
     Entry<DatanodeStorageInfo, BlockTargetPair> repl =
       repls.entries().iterator().next();
@@ -559,7 +560,7 @@ public class TestBlockManager {
     return pipeline;
   }
 
-  private LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> 
getAllPendingReplications() {
+  private LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> 
getAllPendingReconstruction() {
     LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> repls =
       LinkedListMultimap.create();
     for (DatanodeDescriptor dn : nodes) {
@@ -574,8 +575,8 @@ public class TestBlockManager {
   }
 
   /**
-   * Test that a source node for a highest-priority replication is chosen even 
if all available
-   * source nodes have reached their replication limits.
+   * Test that a source node for a highest-priority reconstruction is chosen
+   * even if all available source nodes have reached their replication limits.
    */
   @Test
   public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws 
Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
index 1901dc1..197e3a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
@@ -68,7 +68,7 @@ public class TestBlocksWithNotEnoughRacks {
 
     // Have the NN check for pending replications every second so it
     // quickly schedules additional replicas as they are identified.
-    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+    
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 
1);
 
     // The DNs report blocks every second.
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5865fe2b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
index 5b442cb..e33c7a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
@@ -54,7 +54,7 @@ public class TestOverReplicatedBlocks {
     conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(
-        DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
+        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
         Integer.toString(2));
     MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fs = cluster.getFileSystem();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to