HDFS-9775. Erasure Coding : Rename BlockRecoveryWork to BlockReconstructionWork. Contributed by Rakesh R.
Change-Id: I6dfc8efd94fa2bbb4eec0e4730a5a4f92c8a5519 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0fb2eff Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0fb2eff Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0fb2eff Branch: refs/heads/yarn-2877 Commit: a0fb2eff9b71e2e2c0e53262773b34bed82585d4 Parents: 401ae4e Author: Zhe Zhang <z...@apache.org> Authored: Tue Feb 9 14:42:49 2016 -0800 Committer: Zhe Zhang <z...@apache.org> Committed: Tue Feb 9 14:43:59 2016 -0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockManager.java | 55 ++++----- .../BlockReconstructionWork.java | 111 +++++++++++++++++++ .../blockmanagement/BlockRecoveryWork.java | 111 ------------------- .../blockmanagement/DatanodeDescriptor.java | 2 +- .../blockmanagement/ErasureCodingWork.java | 2 +- .../server/blockmanagement/ReplicationWork.java | 6 +- .../blockmanagement/BlockManagerTestUtil.java | 2 +- .../blockmanagement/TestBlockManager.java | 4 +- 9 files changed, 152 insertions(+), 144 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 875799a..a447617 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -921,6 +921,9 @@ Trunk (Unreleased) HDFS-9658. Erasure Coding: allow to use multiple EC policies in striping related tests. (Rui Li via zhz) + HDFS-9775. Erasure Coding : Rename BlockRecoveryWork to + BlockReconstructionWork. (Rakesh R via zhz) + Release 2.9.0 - UNRELEASED INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 25cec8a..bee9372 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1450,7 +1450,7 @@ public class BlockManager implements BlockStatsMXBean { } /** - * Scan blocks in {@link #neededReplications} and assign recovery + * Scan blocks in {@link #neededReplications} and assign reconstruction * (replication or erasure coding) work to data-nodes they belong to. * * The number of process blocks equals either twice the number of live @@ -1458,7 +1458,7 @@ public class BlockManager implements BlockStatsMXBean { * * @return number of blocks scheduled for replication during this iteration. */ - int computeBlockRecoveryWork(int blocksToProcess) { + int computeBlockReconstructionWork(int blocksToProcess) { List<List<BlockInfo>> blocksToReplicate = null; namesystem.writeLock(); try { @@ -1468,30 +1468,33 @@ public class BlockManager implements BlockStatsMXBean { } finally { namesystem.writeUnlock(); } - return computeRecoveryWorkForBlocks(blocksToReplicate); + return computeReconstructionWorkForBlocks(blocksToReplicate); } /** - * Recover a set of blocks to full strength through replication or + * Reconstruct a set of blocks to full strength through replication or * erasure coding * - * @param blocksToRecover blocks to be recovered, for each priority + * @param blocksToReconstruct blocks to be reconstructed, for each priority * @return the number of blocks scheduled for replication */ @VisibleForTesting - int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) { + int computeReconstructionWorkForBlocks( + List<List<BlockInfo>> blocksToReconstruct) { int scheduledWork = 0; - List<BlockRecoveryWork> recovWork = new LinkedList<>(); + List<BlockReconstructionWork> reconWork = new LinkedList<>(); // Step 1: categorize at-risk blocks into replication and EC tasks namesystem.writeLock(); try { synchronized (neededReplications) { - for (int priority = 0; priority < blocksToRecover.size(); priority++) { - for (BlockInfo block : blocksToRecover.get(priority)) { - BlockRecoveryWork rw = scheduleRecovery(block, priority); + for (int priority = 0; priority < blocksToReconstruct + .size(); priority++) { + for (BlockInfo block : blocksToReconstruct.get(priority)) { + BlockReconstructionWork rw = scheduleReconstruction(block, + priority); if (rw != null) { - recovWork.add(rw); + reconWork.add(rw); } } } @@ -1500,9 +1503,9 @@ public class BlockManager implements BlockStatsMXBean { namesystem.writeUnlock(); } - // Step 2: choose target nodes for each recovery task + // Step 2: choose target nodes for each reconstruction task final Set<Node> excludedNodes = new HashSet<>(); - for(BlockRecoveryWork rw : recovWork){ + for(BlockReconstructionWork rw : reconWork){ // Exclude all of the containing nodes from being targets. // This list includes decommissioning or corrupt nodes. excludedNodes.clear(); @@ -1521,7 +1524,7 @@ public class BlockManager implements BlockStatsMXBean { // Step 3: add tasks to the DN namesystem.writeLock(); try { - for(BlockRecoveryWork rw : recovWork){ + for(BlockReconstructionWork rw : reconWork){ final DatanodeStorageInfo[] targets = rw.getTargets(); if(targets == null || targets.length == 0){ rw.resetTargets(); @@ -1529,7 +1532,7 @@ public class BlockManager implements BlockStatsMXBean { } synchronized (neededReplications) { - if (validateRecoveryWork(rw)) { + if (validateReconstructionWork(rw)) { scheduledWork++; } } @@ -1540,7 +1543,7 @@ public class BlockManager implements BlockStatsMXBean { if (blockLog.isDebugEnabled()) { // log which blocks have been scheduled for replication - for(BlockRecoveryWork rw : recovWork){ + for(BlockReconstructionWork rw : reconWork){ DatanodeStorageInfo[] targets = rw.getTargets(); if (targets != null && targets.length != 0) { StringBuilder targetList = new StringBuilder("datanode(s)"); @@ -1567,7 +1570,8 @@ public class BlockManager implements BlockStatsMXBean { (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block)); } - private BlockRecoveryWork scheduleRecovery(BlockInfo block, int priority) { + private BlockReconstructionWork scheduleReconstruction(BlockInfo block, + int priority) { // block should belong to a file BlockCollection bc = getBlockCollection(block); // abandoned block or block reopened for append @@ -1589,8 +1593,8 @@ public class BlockManager implements BlockStatsMXBean { containingNodes, liveReplicaNodes, numReplicas, liveBlockIndices, priority); if(srcNodes == null || srcNodes.length == 0) { - // block can not be recovered from any node - LOG.debug("Block " + block + " cannot be recovered " + + // block can not be reconstructed from any node + LOG.debug("Block " + block + " cannot be reconstructed " + "from any node"); return null; } @@ -1618,7 +1622,7 @@ public class BlockManager implements BlockStatsMXBean { if (block.isStriped()) { if (pendingNum > 0) { - // Wait the previous recovery to finish. + // Wait the previous reconstruction to finish. return null; } byte[] indices = new byte[liveBlockIndices.size()]; @@ -1635,7 +1639,7 @@ public class BlockManager implements BlockStatsMXBean { } } - private boolean validateRecoveryWork(BlockRecoveryWork rw) { + private boolean validateReconstructionWork(BlockReconstructionWork rw) { BlockInfo block = rw.getBlock(); int priority = rw.getPriority(); // Recheck since global lock was released @@ -1672,11 +1676,12 @@ public class BlockManager implements BlockStatsMXBean { } } - // Add block to the to be recovered list + // Add block to the to be reconstructed list if (block.isStriped()) { assert rw instanceof ErasureCodingWork; assert rw.getTargets().length > 0; - assert pendingNum == 0: "Should wait the previous recovery to finish"; + assert pendingNum == 0 : "Should wait the previous reconstruction" + + " to finish"; String src = getBlockCollection(block).getName(); ErasureCodingPolicy ecPolicy = null; try { @@ -1687,7 +1692,7 @@ public class BlockManager implements BlockStatsMXBean { } if (ecPolicy == null) { blockLog.warn("No erasure coding policy found for the file {}. " - + "So cannot proceed for recovery", src); + + "So cannot proceed for reconstruction", src); // TODO: we may have to revisit later for what we can do better to // handle this case. return false; @@ -4239,7 +4244,7 @@ public class BlockManager implements BlockStatsMXBean { final int nodesToProcess = (int) Math.ceil(numlive * this.blocksInvalidateWorkPct); - int workFound = this.computeBlockRecoveryWork(blocksToProcess); + int workFound = this.computeBlockReconstructionWork(blocksToProcess); // Update counters namesystem.writeLock(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java new file mode 100644 index 0000000..df9c164 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import org.apache.hadoop.net.Node; + +import java.util.Collections; +import java.util.List; +import java.util.Set; + +/** + * This class is used internally by + * {@link BlockManager#computeReconstructionWorkForBlocks} to represent a + * task to reconstruct a block through replication or erasure coding. + * Reconstruction is done by transferring data from srcNodes to targets + */ +abstract class BlockReconstructionWork { + private final BlockInfo block; + + private final BlockCollection bc; + + /** + * An erasure coding reconstruction task has multiple source nodes. + * A replication task only has 1 source node, stored on top of the array + */ + private final DatanodeDescriptor[] srcNodes; + /** Nodes containing the block; avoid them in choosing new targets */ + private final List<DatanodeDescriptor> containingNodes; + /** Required by {@link BlockPlacementPolicy#chooseTarget} */ + private final List<DatanodeStorageInfo> liveReplicaStorages; + private final int additionalReplRequired; + + private DatanodeStorageInfo[] targets; + private final int priority; + + public BlockReconstructionWork(BlockInfo block, + BlockCollection bc, + DatanodeDescriptor[] srcNodes, + List<DatanodeDescriptor> containingNodes, + List<DatanodeStorageInfo> liveReplicaStorages, + int additionalReplRequired, + int priority) { + this.block = block; + this.bc = bc; + this.srcNodes = srcNodes; + this.containingNodes = containingNodes; + this.liveReplicaStorages = liveReplicaStorages; + this.additionalReplRequired = additionalReplRequired; + this.priority = priority; + this.targets = null; + } + + DatanodeStorageInfo[] getTargets() { + return targets; + } + + void resetTargets() { + this.targets = null; + } + + void setTargets(DatanodeStorageInfo[] targets) { + this.targets = targets; + } + + List<DatanodeDescriptor> getContainingNodes() { + return Collections.unmodifiableList(containingNodes); + } + + public int getPriority() { + return priority; + } + + public BlockInfo getBlock() { + return block; + } + + public DatanodeDescriptor[] getSrcNodes() { + return srcNodes; + } + + BlockCollection getBc() { + return bc; + } + + List<DatanodeStorageInfo> getLiveReplicaStorages() { + return liveReplicaStorages; + } + + public int getAdditionalReplRequired() { + return additionalReplRequired; + } + + abstract void chooseTargets(BlockPlacementPolicy blockplacement, + BlockStoragePolicySuite storagePolicySuite, + Set<Node> excludedNodes); +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockRecoveryWork.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockRecoveryWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockRecoveryWork.java deleted file mode 100644 index ed546df..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockRecoveryWork.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import org.apache.hadoop.net.Node; - -import java.util.Collections; -import java.util.List; -import java.util.Set; - -/** - * This class is used internally by - * {@link BlockManager#computeRecoveryWorkForBlocks} to represent a task to - * recover a block through replication or erasure coding. Recovery is done by - * transferring data from srcNodes to targets - */ -abstract class BlockRecoveryWork { - private final BlockInfo block; - - private final BlockCollection bc; - - /** - * An erasure coding recovery task has multiple source nodes. - * A replication task only has 1 source node, stored on top of the array - */ - private final DatanodeDescriptor[] srcNodes; - /** Nodes containing the block; avoid them in choosing new targets */ - private final List<DatanodeDescriptor> containingNodes; - /** Required by {@link BlockPlacementPolicy#chooseTarget} */ - private final List<DatanodeStorageInfo> liveReplicaStorages; - private final int additionalReplRequired; - - private DatanodeStorageInfo[] targets; - private final int priority; - - public BlockRecoveryWork(BlockInfo block, - BlockCollection bc, - DatanodeDescriptor[] srcNodes, - List<DatanodeDescriptor> containingNodes, - List<DatanodeStorageInfo> liveReplicaStorages, - int additionalReplRequired, - int priority) { - this.block = block; - this.bc = bc; - this.srcNodes = srcNodes; - this.containingNodes = containingNodes; - this.liveReplicaStorages = liveReplicaStorages; - this.additionalReplRequired = additionalReplRequired; - this.priority = priority; - this.targets = null; - } - - DatanodeStorageInfo[] getTargets() { - return targets; - } - - void resetTargets() { - this.targets = null; - } - - void setTargets(DatanodeStorageInfo[] targets) { - this.targets = targets; - } - - List<DatanodeDescriptor> getContainingNodes() { - return Collections.unmodifiableList(containingNodes); - } - - public int getPriority() { - return priority; - } - - public BlockInfo getBlock() { - return block; - } - - public DatanodeDescriptor[] getSrcNodes() { - return srcNodes; - } - - BlockCollection getBc() { - return bc; - } - - List<DatanodeStorageInfo> getLiveReplicaStorages() { - return liveReplicaStorages; - } - - public int getAdditionalReplRequired() { - return additionalReplRequired; - } - - abstract void chooseTargets(BlockPlacementPolicy blockplacement, - BlockStoragePolicySuite storagePolicySuite, - Set<Node> excludedNodes); -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 9e7ab20..1646129 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -608,7 +608,7 @@ public class DatanodeDescriptor extends DatanodeInfo { BlockECReconstructionInfo task = new BlockECReconstructionInfo(block, sources, targets, liveBlockIndices, ecPolicy); erasurecodeBlocks.offer(task); - BlockManager.LOG.debug("Adding block recovery task " + task + "to " + BlockManager.LOG.debug("Adding block reconstruction task " + task + "to " + getName() + ", current queue size is " + erasurecodeBlocks.size()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java index fec669c..85a25d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java @@ -22,7 +22,7 @@ import org.apache.hadoop.net.Node; import java.util.List; import java.util.Set; -class ErasureCodingWork extends BlockRecoveryWork { +class ErasureCodingWork extends BlockReconstructionWork { private final byte[] liveBlockIndicies; public ErasureCodingWork(BlockInfo block, http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java index 8a3900c..b44b9b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java @@ -19,11 +19,10 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.net.Node; -import java.util.Collections; import java.util.List; import java.util.Set; -class ReplicationWork extends BlockRecoveryWork { +class ReplicationWork extends BlockReconstructionWork { public ReplicationWork(BlockInfo block, BlockCollection bc, DatanodeDescriptor[] srcNodes, List<DatanodeDescriptor> containingNodes, List<DatanodeStorageInfo> liveReplicaStorages, int additionalReplRequired, @@ -33,7 +32,8 @@ class ReplicationWork extends BlockRecoveryWork { assert getSrcNodes().length == 1 : "There should be exactly 1 source node that have been selected"; getSrcNodes()[0].incrementPendingReplicationWithoutTargets(); - BlockManager.LOG.debug("Creating a ReplicationWork to recover " + block); + BlockManager.LOG + .debug("Creating a ReplicationWork to reconstruct " + block); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index 8a2b3dd..c0a4fdb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -163,7 +163,7 @@ public class BlockManagerTestUtil { */ public static int computeAllPendingWork(BlockManager bm) { int work = computeInvalidationWork(bm); - work += bm.computeBlockRecoveryWork(Integer.MAX_VALUE); + work += bm.computeBlockReconstructionWork(Integer.MAX_VALUE); return work; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0fb2eff/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index a970d77..5511b99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -540,8 +540,8 @@ public class TestBlockManager { assertEquals("Block not initially pending replication", 0, bm.pendingReplications.getNumReplicas(block)); assertEquals( - "computeBlockRecoveryWork should indicate replication is needed", 1, - bm.computeRecoveryWorkForBlocks(list_all)); + "computeBlockReconstructionWork should indicate replication is needed", + 1, bm.computeReconstructionWorkForBlocks(list_all)); assertTrue("replication is pending after work is computed", bm.pendingReplications.getNumReplicas(block) > 0);