This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 5b81caf0cf4 HDFS-17137. Standby/Observer NameNode skip to handle 
redundant replica block logic when set decrease replication. (#5913). 
Contributed by Haiyang Hu.
5b81caf0cf4 is described below

commit 5b81caf0cf44a532b24387aad2630d92155ce7c4
Author: huhaiyang <huhaiyang...@126.com>
AuthorDate: Tue Aug 8 15:42:23 2023 +0800

    HDFS-17137. Standby/Observer NameNode skip to handle redundant replica 
block logic when set decrease replication. (#5913). Contributed by Haiyang Hu.
    
    Reviewed-by: Tao Li <toms...@apache.org>
    Signed-off-by: He Xiaoqiao <hexiaoq...@apache.org>
---
 .../hdfs/server/blockmanagement/BlockManager.java  |  5 ++
 .../namenode/ha/TestStandbyBlockManagement.java    | 54 ++++++++++++++++++++++
 2 files changed, 59 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index d00bface655..eb960e62e36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4030,6 +4030,11 @@ public class BlockManager implements BlockStatsMXBean {
 
     // update neededReconstruction priority queues
     b.setReplication(newRepl);
+
+    // Process the block only when active NN is out of safe mode.
+    if (!isPopulatingReplQueues()) {
+      return;
+    }
     NumberReplicas num = countNodes(b);
     updateNeededReconstructions(b, 0, newRepl - oldRepl);
     if (shouldProcessExtraRedundancy(num, newRepl)) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java
index 74c6f212408..4ddbbaa10f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java
@@ -97,4 +97,58 @@ public class TestStandbyBlockManagement {
     }
   }
 
+  /**
+   * Test Standby/Observer NameNode should not handle redundant replica block 
logic
+   * when set decrease replication.
+   * @throws Exception
+   */
+  @Test(timeout = 60000)
+  public void testNotHandleRedundantReplica() throws Exception {
+    Configuration conf = new Configuration();
+    HAUtil.setAllowStandbyReads(conf, true);
+    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+
+    // Create HA Cluster.
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(4).build()) {
+      cluster.waitActive();
+      cluster.transitionToActive(0);
+
+      NameNode nn1 = cluster.getNameNode(0);
+      assertEquals("ACTIVE", nn1.getNamesystem().getState().name());
+      NameNode nn2 = cluster.getNameNode(1);
+      assertEquals("STANDBY", nn2.getNamesystem().getState().name());
+
+      cluster.triggerHeartbeats();
+      // Sending the FBR.
+      cluster.triggerBlockReports();
+
+      // Default excessRedundancyMap size as 0.
+      assertEquals(0, 
nn1.getNamesystem().getBlockManager().getExcessBlocksCount());
+      assertEquals(0, 
nn2.getNamesystem().getBlockManager().getExcessBlocksCount());
+
+      FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+      // Create test file.
+      Path file = new Path("/test");
+      long fileLength = 512;
+      DFSTestUtil.createFile(fs, file, fileLength, (short) 4, 0L);
+      DFSTestUtil.waitReplication(fs, file, (short) 4);
+
+      // Set decrease 3 replication.
+      fs.setReplication(file, (short) 3);
+      HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
+
+      // Make sure the DN has deleted the block and report to NNs.
+      cluster.triggerHeartbeats();
+      HATestUtil.waitForDNDeletions(cluster);
+      cluster.triggerDeletionReports();
+
+      DFSTestUtil.waitReplication(fs, file, (short) 3);
+
+      // Delete excess replica, active and standby nn excessRedundancyMap size 
as 0.
+      assertEquals(0, 
nn1.getNamesystem().getBlockManager().getExcessBlocksCount());
+      assertEquals(0, 
nn2.getNamesystem().getBlockManager().getExcessBlocksCount());
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to