Author: wang
Date: Wed Aug 20 18:19:13 2014
New Revision: 1619194
URL: http://svn.apache.org/r1619194
Log:
Merge from trunk to branch
Modified:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/
(props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
(props changed)
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
Propchange:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1618998-1619192
Modified:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1619194&r1=1619193&r2=1619194&view=diff
==============================================================================
---
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(original)
+++
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Wed Aug 20 18:19:13 2014
@@ -520,6 +520,12 @@ Release 2.6.0 - UNRELEASED
HDFS-6569. OOB message can't be sent to the client when DataNode shuts
down for upgrade
(brandonli)
+ HDFS-6868. portmap and nfs3 are documented as hadoop commands instead of
hdfs
+ (brandonli)
+
+ HDFS-6870. Blocks and INodes could leak for Rename with overwrite flag. (Yi
+ Liu via jing9)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
Propchange:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1618998-1619192
Modified:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1619194&r1=1619193&r2=1619194&view=diff
==============================================================================
---
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Wed Aug 20 18:19:13 2014
@@ -666,15 +666,20 @@ public class FSDirectory implements Clos
tx.updateMtimeAndLease(timestamp);
// Collect the blocks and remove the lease for previous dst
- long filesDeleted = -1;
+ boolean filesDeleted = false;
if (removedDst != null) {
undoRemoveDst = false;
if (removedNum > 0) {
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<INode>();
- filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID,
- dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes,
- true).get(Quota.NAMESPACE);
+ if (!removedDst.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
+ removedDst.destroyAndCollectBlocks(collectedBlocks,
removedINodes);
+ filesDeleted = true;
+ } else {
+ filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID,
+ dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes,
+ true).get(Quota.NAMESPACE) >= 0;
+ }
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
removedINodes, false);
}
@@ -687,7 +692,7 @@ public class FSDirectory implements Clos
}
tx.updateQuotasInSourceTree();
- return filesDeleted >= 0;
+ return filesDeleted;
}
} finally {
if (undoRemoveSrc) {
Modified:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1619194&r1=1619193&r2=1619194&view=diff
==============================================================================
---
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
(original)
+++
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
Wed Aug 20 18:19:13 2014
@@ -209,7 +209,7 @@ HDFS NFS Gateway
[[2]] Start package included portmap (needs root privileges):
-------------------------
- hadoop portmap
+ hdfs portmap
OR
@@ -224,7 +224,7 @@ HDFS NFS Gateway
as long as the user has read access to the Kerberos keytab defined in
"nfs.keytab.file".
-------------------------
- hadoop nfs3
+ hdfs nfs3
OR
Modified:
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1619194&r1=1619193&r2=1619194&view=diff
==============================================================================
---
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
(original)
+++
hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
Wed Aug 20 18:19:13 2014
@@ -27,6 +27,9 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.junit.Test;
@@ -125,4 +128,45 @@ public class TestDFSRename {
if (cluster != null) {cluster.shutdown();}
}
}
+
+ /**
+ * Check the blocks of dst file are cleaned after rename with overwrite
+ */
+ @Test(timeout = 120000)
+ public void testRenameWithOverwrite() throws Exception {
+ final short replFactor = 2;
+ final long blockSize = 512;
+ Configuration conf = new Configuration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+ numDataNodes(replFactor).build();
+ DistributedFileSystem dfs = cluster.getFileSystem();
+ try {
+
+ long fileLen = blockSize*3;
+ String src = "/foo/src";
+ String dst = "/foo/dst";
+ Path srcPath = new Path(src);
+ Path dstPath = new Path(dst);
+
+ DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
+ DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
+
+ LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(
+ cluster.getNameNode(), dst, 0, fileLen);
+ BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).
+ getBlockManager();
+ assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
+ getLocalBlock()) != null);
+ dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
+ assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
+ getLocalBlock()) == null);
+ } finally {
+ if (dfs != null) {
+ dfs.close();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}