This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 0c9e0b4398a2 HDFS-17456. Fix the incorrect dfsused statistics of 
datanode when appending a file. (#6713). Contributed by fuchaohong.
0c9e0b4398a2 is described below

commit 0c9e0b4398a215087fc32e9743d4584d52a1708d
Author: fuchaohong <1783129...@qq.com>
AuthorDate: Tue Apr 30 12:22:53 2024 +0800

    HDFS-17456. Fix the incorrect dfsused statistics of datanode when appending 
a file. (#6713). Contributed by fuchaohong.
    
    Reviewed-by: ZanderXu <zande...@apache.org>
    Signed-off-by: He Xiaoqiao <hexiaoq...@apache.org>
---
 .../datanode/fsdataset/impl/FsVolumeImpl.java      |  2 ++
 .../datanode/fsdataset/impl/TestFsDatasetImpl.java | 42 ++++++++++++++++++++++
 2 files changed, 44 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 47f0a3556aad..6b026823f19f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -1291,7 +1291,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
     // rename meta file to rbw directory
     // rename block file to rbw directory
+    long oldReplicaLength = replicaInfo.getNumBytes() + 
replicaInfo.getMetadataLength();
     newReplicaInfo.moveReplicaFrom(replicaInfo, newBlkFile);
+    getBlockPoolSlice(bpid).decDfsUsed(oldReplicaLength);
 
     reserveSpaceForReplica(bytesReserved);
     return newReplicaInfo;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 5468473d9de0..dd85ab6328c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -2102,4 +2102,46 @@ public class TestFsDatasetImpl {
       DataNodeFaultInjector.set(oldDnInjector);
     }
   }
+
+  @Test(timeout = 30000)
+  public void testAppend() {
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(1)
+          .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
+          .storagesPerDatanode(2)
+          .build();
+      FileSystem fs = cluster.getFileSystem();
+      DataNode dataNode = cluster.getDataNodes().get(0);
+
+      // Create test file
+      Path filePath = new Path("testData");
+      FsDatasetImpl fsDataSetImpl = (FsDatasetImpl) dataNode.getFSDataset();
+      DFSTestUtil.createFile(fs, filePath, 100, (short) 1, 0);
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
+      ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block);
+      long oldMetaLength = replicaInfo.getMetadataLength();
+      long oldDfsUsed = fsDataSetImpl.getDfsUsed();
+
+      // Append to file
+      int appendLength = 100;
+      DFSTestUtil.appendFile(fs, filePath, appendLength);
+
+      block = DFSTestUtil.getFirstBlock(fs, filePath);
+      replicaInfo = fsDataSetImpl.getReplicaInfo(block);
+      long newMetaLength = replicaInfo.getMetadataLength();
+      long newDfsUsed = fsDataSetImpl.getDfsUsed();
+
+      assert newDfsUsed == oldDfsUsed + appendLength + (newMetaLength - 
oldMetaLength) :
+          "When appending a file, the dfsused statistics of datanode are 
incorrect.";
+    } catch (Exception ex) {
+      LOG.info("Exception in testAppend ", ex);
+      fail("Exception while testing testAppend ");
+    } finally {
+      if (cluster.isClusterUp()) {
+        cluster.shutdown();
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to