This is an automated email from the ASF dual-hosted git repository. surendralilhore pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push: new bf45f3b HDFS-14957. INodeReference Space Consumed was not same in QuotaUsage and ContentSummary. Contributed by hemanthboyina. bf45f3b is described below commit bf45f3b80a88ca6e6ab1289dc5b71d9d6e6f6c10 Author: Surendra Singh Lilhore <surendralilh...@apache.org> AuthorDate: Thu Jan 9 12:04:05 2020 +0530 HDFS-14957. INodeReference Space Consumed was not same in QuotaUsage and ContentSummary. Contributed by hemanthboyina. --- .../hadoop/hdfs/server/namenode/INodeFile.java | 42 ++++++++++++++++++++-- .../namenode/snapshot/TestRenameWithSnapshots.java | 26 +++++++++++++- 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index ce654b7..67c86b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -873,8 +873,18 @@ public class INodeFile extends INodeWithAdditionalFields counts.addContent(Content.FILE, 1); final long fileLen = computeFileSize(snapshotId); counts.addContent(Content.LENGTH, fileLen); - counts.addContent(Content.DISKSPACE, storagespaceConsumed(null) - .getStorageSpace()); + + FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); + if (sf == null) { + counts.addContent(Content.DISKSPACE, + storagespaceConsumed(null).getStorageSpace()); + } else if (isStriped()) { + counts.addContent(Content.DISKSPACE, + storagespaceConsumedStriped().getStorageSpace()); + } else { + long diskSpaceQuota = getDiskSpaceQuota(counts, sf, snapshotId); + counts.addContent(Content.DISKSPACE, diskSpaceQuota); + } if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){ BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite(). @@ -890,6 +900,34 @@ public class INodeFile extends INodeWithAdditionalFields return summary; } + /** + * Compute disk space consumed by all the blocks in snapshots. + */ + private long getDiskSpaceQuota(ContentCounts counts, + FileWithSnapshotFeature sf, int lastSnapshotId) { + FileDiffList fileDiffList = sf.getDiffs(); + int last = fileDiffList.getLastSnapshotId(); + + if (lastSnapshotId == Snapshot.CURRENT_STATE_ID + || last == Snapshot.CURRENT_STATE_ID) { + return storagespaceConsumed(null).getStorageSpace(); + } + + final long ssDeltaNoReplication; + short replication; + + if (last < lastSnapshotId) { + ssDeltaNoReplication = computeFileSize(true, false); + replication = getFileReplication(); + } else { + int sid = fileDiffList.getSnapshotById(lastSnapshotId); + ssDeltaNoReplication = computeFileSize(sid); + replication = getFileReplication(sid); + } + + return ssDeltaNoReplication * replication; + } + /** The same as computeFileSize(null). */ public final long computeFileSize() { return computeFileSize(CURRENT_STATE_ID); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 128e3ba..fcc3c1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -2476,4 +2476,28 @@ public class TestRenameWithSnapshots { output.println(b); return b; } -} + + /** + * Test getContentsummary and getQuotausage for an INodeReference. + */ + @Test(timeout = 300000) + public void testQuotaForRenameFileInSnapshot() throws Exception { + final Path snapshotDir = new Path("/testRenameWithSnapshot"); + hdfs.mkdirs(snapshotDir, new FsPermission((short) 0777)); + final Path file = new Path(snapshotDir, "file"); + DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED); + hdfs.allowSnapshot(snapshotDir); + hdfs.createSnapshot(snapshotDir, "s0"); + hdfs.mkdirs(new Path("/dir1")); + + // Truncate a file which exists in snapshot , that is an + // INodeReference + hdfs.truncate(file, 10); + hdfs.rename(file, new Path("/dir1")); + assertEquals(hdfs.getContentSummary(new Path("/")).getSpaceConsumed(), + hdfs.getQuotaUsage(new Path("/")).getSpaceConsumed()); + assertEquals( + hdfs.getContentSummary(new Path("/")).getFileAndDirectoryCount(), + hdfs.getQuotaUsage(new Path("/")).getFileAndDirectoryCount()); + } +} \ No newline at end of file --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org