This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 4f79df4da12d02b66cbd9aae77bc39299025dd6a
Author: imbajin <imba...@users.noreply.github.com>
AuthorDate: Fri Sep 11 15:44:03 2020 +0800

    HDFS-15551. Tiny Improve for DeadNode detector (#2265)
    
    Contributed by imbajin.
    
    Reviewed-by: leosunli <lisheng.su...@gmail.com>
    Signed-off-by: He Xiaoqiao <hexiaoq...@apache.org>
    (cherry picked from commit 89428f142fe7cee17bd1a0f5f207b6952ec79d32)
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java  |  3 +++
 .../main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java    | 11 ++++++-----
 2 files changed, 9 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 0a7fc8f..a918101 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -181,10 +181,13 @@ public class DFSInputStream extends FSInputStream
   private byte[] oneByteBuf; // used for 'int read()'
 
   protected void addToLocalDeadNodes(DatanodeInfo dnInfo) {
+    DFSClient.LOG.debug("Add {} to local dead nodes, previously was {}.",
+            dnInfo, deadNodes);
     deadNodes.put(dnInfo, dnInfo);
   }
 
   protected void removeFromLocalDeadNodes(DatanodeInfo dnInfo) {
+    DFSClient.LOG.debug("Remove {} from local dead nodes.", dnInfo);
     deadNodes.remove(dnInfo);
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java
index 8066b8f..112bc04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DeadNodeDetector.java
@@ -294,7 +294,7 @@ public class DeadNodeDetector extends Daemon {
   }
 
   /**
-   * Prode datanode by probe byte.
+   * Prode datanode by probe type.
    */
   private void scheduleProbe(ProbeType type) {
     LOG.debug("Schedule probe datanode for probe type: {}.", type);
@@ -376,9 +376,8 @@ public class DeadNodeDetector extends Daemon {
       } catch (Exception e) {
         LOG.error("Probe failed, datanode: {}, type: {}.", datanodeInfo, type,
             e);
+        deadNodeDetector.probeCallBack(this, false);
       }
-
-      deadNodeDetector.probeCallBack(this, false);
     }
   }
 
@@ -402,7 +401,7 @@ public class DeadNodeDetector extends Daemon {
       }
     } else {
       if (probe.getType() == ProbeType.CHECK_SUSPECT) {
-        LOG.info("Add the node to dead node list: {}.",
+        LOG.warn("Probe failed, add suspect node to dead node list: {}.",
             probe.getDatanodeInfo());
         addToDead(probe.getDatanodeInfo());
       }
@@ -415,11 +414,12 @@ public class DeadNodeDetector extends Daemon {
   private void checkDeadNodes() {
     Set<DatanodeInfo> datanodeInfos = clearAndGetDetectedDeadNodes();
     for (DatanodeInfo datanodeInfo : datanodeInfos) {
-      LOG.debug("Add dead node to check: {}.", datanodeInfo);
       if (!deadNodesProbeQueue.offer(datanodeInfo)) {
         LOG.debug("Skip to add dead node {} to check " +
                 "since the probe queue is full.", datanodeInfo);
         break;
+      } else {
+        LOG.debug("Add dead node to check: {}.", datanodeInfo);
       }
     }
     state = State.IDLE;
@@ -475,6 +475,7 @@ public class DeadNodeDetector extends Daemon {
       datanodeInfos.add(datanodeInfo);
     }
 
+    LOG.debug("Add datanode {} to suspectAndDeadNodes.", datanodeInfo);
     addSuspectNodeToDetect(datanodeInfo);
   }
 

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to