HDFS-9600. do not check replication if the block is under construction (Contributed by Phil Yang)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34cd7cd7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34cd7cd7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34cd7cd7 Branch: refs/heads/HDFS-1312 Commit: 34cd7cd76505d01ec251e30837c94ab03319a0c1 Parents: 2b25284 Author: Vinayakumar B <[email protected]> Authored: Thu Jan 7 11:27:42 2016 +0530 Committer: Vinayakumar B <[email protected]> Committed: Thu Jan 7 11:27:42 2016 +0530 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockManager.java | 3 +- .../blockmanagement/DecommissionManager.java | 5 +- .../blockmanagement/TestBlockManager.java | 58 ++++++++++++++++++++ 4 files changed, 66 insertions(+), 3 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/34cd7cd7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d625b93..dfa9701 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -3724,6 +3724,9 @@ Release 2.6.4 - UNRELEASED BUG FIXES + HDFS-9600. do not check replication if the block is under construction + (Phil Yang via vinayakumarb) + Release 2.6.3 - 2015-12-17 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/34cd7cd7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 0f27240..cee03f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3946,7 +3946,8 @@ public class BlockManager implements BlockStatsMXBean { */ boolean isNeededReplication(BlockInfo storedBlock, int current) { int expected = getExpectedReplicaNum(storedBlock); - return current < expected || !isPlacementPolicySatisfied(storedBlock); + return storedBlock.isComplete() + && (current < expected || !isPlacementPolicySatisfied(storedBlock)); } public short getExpectedReplicaNum(BlockInfo block) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/34cd7cd7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java index 8656f69..2a5d63c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java @@ -243,8 +243,9 @@ public class DecommissionManager { NumberReplicas numberReplicas) { final int numExpected = blockManager.getExpectedReplicaNum(block); final int numLive = numberReplicas.liveReplicas(); - if (!blockManager.isNeededReplication(block, numLive)) { - // Block doesn't need replication. Skip. + if (numLive >= numExpected + && blockManager.isPlacementPolicySatisfied(block)) { + // Block has enough replica, skip LOG.trace("Block {} does not need replication.", block); return true; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/34cd7cd7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index d92ac90..4a74d2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -32,6 +32,7 @@ import static org.mockito.Mockito.verify; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.EnumSet; import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; @@ -48,16 +49,22 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; @@ -70,8 +77,11 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; +import org.apache.hadoop.io.EnumSetWritable; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.net.NetworkTopology; @@ -397,6 +407,54 @@ public class TestBlockManager { assertFalse(bm.isNeededReplication(block, bm.countLiveNodes(block))); } + @Test(timeout = 60000) + public void testNeededReplicationWhileAppending() throws IOException { + Configuration conf = new HdfsConfiguration(); + String src = "/test-file"; + Path file = new Path(src); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + try { + BlockManager bm = cluster.getNamesystem().getBlockManager(); + FileSystem fs = cluster.getFileSystem(); + NamenodeProtocols namenode = cluster.getNameNodeRpc(); + DFSOutputStream out = null; + try { + out = (DFSOutputStream) (fs.create(file). + getWrappedStream()); + out.write(1); + out.hflush(); + out.close(); + FSDataInputStream in = null; + ExtendedBlock oldBlock = null; + try { + in = fs.open(file); + oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock(); + } finally { + IOUtils.closeStream(in); + } + + String clientName = + ((DistributedFileSystem) fs).getClient().getClientName(); + namenode.append(src, clientName, new EnumSetWritable<>( + EnumSet.of(CreateFlag.APPEND))); + LocatedBlock newLocatedBlock = + namenode.updateBlockForPipeline(oldBlock, clientName); + ExtendedBlock newBlock = + new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), + oldBlock.getNumBytes(), + newLocatedBlock.getBlock().getGenerationStamp()); + namenode.updatePipeline(clientName, oldBlock, newBlock, + newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs()); + BlockInfo bi = bm.getStoredBlock(newBlock.getLocalBlock()); + assertFalse(bm.isNeededReplication(bi, bm.countLiveNodes(bi))); + } finally { + IOUtils.closeStream(out); + } + } finally { + cluster.shutdown(); + } + } /** * Tell the block manager that replication is completed for the given
