Author: jing9 Date: Wed Aug 20 18:14:43 2014 New Revision: 1619193 URL: http://svn.apache.org/r1619193 Log: HDFS-6870. Merge r1619192 from trunk.
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1619193&r1=1619192&r2=1619193&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Aug 20 18:14:43 2014 @@ -268,6 +268,9 @@ Release 2.6.0 - UNRELEASED HDFS-6868. portmap and nfs3 are documented as hadoop commands instead of hdfs (brandonli) + HDFS-6870. Blocks and INodes could leak for Rename with overwrite flag. (Yi + Liu via jing9) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1619193&r1=1619192&r2=1619193&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Aug 20 18:14:43 2014 @@ -646,15 +646,20 @@ public class FSDirectory implements Clos tx.updateMtimeAndLease(timestamp); // Collect the blocks and remove the lease for previous dst - long filesDeleted = -1; + boolean filesDeleted = false; if (removedDst != null) { undoRemoveDst = false; if (removedNum > 0) { BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List<INode> removedINodes = new ChunkedArrayList<INode>(); - filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID, - dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, - true).get(Quota.NAMESPACE); + if (!removedDst.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) { + removedDst.destroyAndCollectBlocks(collectedBlocks, removedINodes); + filesDeleted = true; + } else { + filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID, + dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, + true).get(Quota.NAMESPACE) >= 0; + } getFSNamesystem().removePathAndBlocks(src, collectedBlocks, removedINodes, false); } @@ -667,7 +672,7 @@ public class FSDirectory implements Clos } tx.updateQuotasInSourceTree(); - return filesDeleted >= 0; + return filesDeleted; } } finally { if (undoRemoveSrc) { Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1619193&r1=1619192&r2=1619193&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java Wed Aug 20 18:14:43 2014 @@ -27,6 +27,9 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.junit.Test; @@ -125,4 +128,45 @@ public class TestDFSRename { if (cluster != null) {cluster.shutdown();} } } + + /** + * Check the blocks of dst file are cleaned after rename with overwrite + */ + @Test(timeout = 120000) + public void testRenameWithOverwrite() throws Exception { + final short replFactor = 2; + final long blockSize = 512; + Configuration conf = new Configuration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf). + numDataNodes(replFactor).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + try { + + long fileLen = blockSize*3; + String src = "/foo/src"; + String dst = "/foo/dst"; + Path srcPath = new Path(src); + Path dstPath = new Path(dst); + + DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1); + DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1); + + LocatedBlocks lbs = NameNodeAdapter.getBlockLocations( + cluster.getNameNode(), dst, 0, fileLen); + BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()). + getBlockManager(); + assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock(). + getLocalBlock()) != null); + dfs.rename(srcPath, dstPath, Rename.OVERWRITE); + assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock(). + getLocalBlock()) == null); + } finally { + if (dfs != null) { + dfs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } }