[ 
https://issues.apache.org/jira/browse/HDFS-17033?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17740922#comment-17740922
 ] 

ASF GitHub Bot commented on HDFS-17033:
---------------------------------------

Hexiaoqiao commented on code in PR #5815:
URL: https://github.com/apache/hadoop/pull/5815#discussion_r1255363924


##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java:
##########
@@ -1681,6 +1682,66 @@ public Boolean get() {
     assertFalse(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
   }
 
+  /**
+   * Test for blockIdCK with datanode staleness.
+   */
+  @Test
+  public void testBlockIdCKStaleness() throws Exception {
+    final short replFactor = 1;
+    short numDn = 1;
+    final long blockSize = 512;
+    Configuration conf = new Configuration();
+
+    // Shorten dfs.namenode.stale.datanode.interval for easier testing.
+    conf.set(DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, String.valueOf(5000));
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
+
+    String[] racks = {"/rack1", "/rack2"};
+    String[] hosts = {"host1", "host2"};
+
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+        .numDataNodes(numDn).hosts(hosts).racks(racks).build();
+    assertNotNull("Failed Cluster Creation", cluster);
+    cluster.waitClusterUp();
+    FileSystem fs = FileSystem.get(conf);
+    assertNotNull("Failed to get FileSystem", fs);
+
+    try {
+      DFSTestUtil util = new DFSTestUtil.Builder().
+          setName(getClass().getSimpleName()).setNumFiles(1).build();
+
+      // Create one file.
+      final String pathString = new String("/testfile");
+      final Path path = new Path(pathString);
+      util.createFile(fs, path, 1024L, replFactor, 1000L);
+      util.waitReplication(fs, path, replFactor);
+      StringBuilder sb = new StringBuilder();
+      for (LocatedBlock lb: util.getAllBlocks(fs, path)){
+        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
+      }
+      String[] bIds = sb.toString().split(" ");
+
+      // Make sure datanode is HEALTHY before down.
+      String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+      // Make the block on datanode go into stale.
+      cluster.stopDataNode(0);
+      Thread.sleep(7000);

Review Comment:
   Please try to use `GenericTestUtils.waitFor` instead `Thread.sleep`.



##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java:
##########
@@ -1681,6 +1682,66 @@ public Boolean get() {
     assertFalse(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
   }
 
+  /**
+   * Test for blockIdCK with datanode staleness.
+   */
+  @Test
+  public void testBlockIdCKStaleness() throws Exception {
+    final short replFactor = 1;
+    short numDn = 1;
+    final long blockSize = 512;
+    Configuration conf = new Configuration();
+
+    // Shorten dfs.namenode.stale.datanode.interval for easier testing.
+    conf.set(DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, String.valueOf(5000));

Review Comment:
   It should be `setLong` directly here.





> Update fsck to display stale state info of blocks accurately
> ------------------------------------------------------------
>
>                 Key: HDFS-17033
>                 URL: https://issues.apache.org/jira/browse/HDFS-17033
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>          Components: datanode, namanode
>            Reporter: WangYuanben
>            Assignee: WangYuanben
>            Priority: Minor
>              Labels: pull-request-available
>
> When the DN is stale, Block replica on this DN should be "STALE" instead of 
> "HEALTHY" in block check of fsck.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to