This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 1498a8685d50 HDFS-17282. Reconfig 'SlowIoWarningThreshold' parameters 
for datanode. (#6338). Contributed by huangzhaobo99
1498a8685d50 is described below

commit 1498a8685d504baeeeeb97bf4a7c71daffba375e
Author: huangzhaobo <huangzhaob...@126.com>
AuthorDate: Thu Dec 14 14:17:53 2023 +0800

    HDFS-17282. Reconfig 'SlowIoWarningThreshold' parameters for datanode. 
(#6338). Contributed by huangzhaobo99
    
    Reviewed-by: Haiyang Hu <haiyang...@shopee.com>
    Reviewed-by: Tao Li <toms...@apache.org>
    Signed-off-by: Ayush Saxena <ayushsax...@apache.org>
---
 .../apache/hadoop/hdfs/server/datanode/DNConf.java |  9 +++++-
 .../hadoop/hdfs/server/datanode/DataNode.java      | 25 ++++++++++++++++-
 .../datanode/TestDataNodeReconfiguration.java      | 32 ++++++++++++++++++++++
 .../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java |  2 +-
 4 files changed, 65 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index be36ca70fe44..21b92db3073a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -37,6 +37,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PMEM_CACHE_RECOV
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PMEM_CACHE_RECOVERY_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PROCESS_COMMANDS_THRESHOLD_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PROCESS_COMMANDS_THRESHOLD_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_KEY;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
@@ -114,7 +115,7 @@ public class DNConf {
   final long ibrInterval;
   volatile long initialBlockReportDelayMs;
   volatile long cacheReportInterval;
-  final long datanodeSlowIoWarningThresholdMs;
+  private volatile long datanodeSlowIoWarningThresholdMs;
 
   final String minimumNameNodeVersion;
   final String encryptionAlgorithm;
@@ -522,4 +523,10 @@ public class DNConf {
         DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
         DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
   }
+
+  public void setDatanodeSlowIoWarningThresholdMs(long threshold) {
+    Preconditions.checkArgument(threshold > 0,
+        DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY + " should be greater than 
0");
+    datanodeSlowIoWarningThresholdMs = threshold;
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 8fb009dab850..9f73b1cd3c35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -80,6 +80,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THR
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
@@ -371,7 +373,8 @@ public class DataNode extends ReconfigurableBase
               DFS_DISK_BALANCER_PLAN_VALID_INTERVAL,
               DFS_DATANODE_DATA_TRANSFER_BANDWIDTHPERSEC_KEY,
               DFS_DATANODE_DATA_WRITE_BANDWIDTHPERSEC_KEY,
-              DFS_DATANODE_DATA_READ_BANDWIDTHPERSEC_KEY));
+              DFS_DATANODE_DATA_READ_BANDWIDTHPERSEC_KEY,
+              DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY));
 
   public static final String METRICS_LOG_NAME = "DataNodeMetricsLog";
 
@@ -735,6 +738,8 @@ public class DataNode extends ReconfigurableBase
     case DFS_DISK_BALANCER_ENABLED:
     case DFS_DISK_BALANCER_PLAN_VALID_INTERVAL:
       return reconfDiskBalancerParameters(property, newVal);
+    case DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY:
+      return reconfSlowIoWarningThresholdParameters(property, newVal);
     default:
       break;
     }
@@ -1056,6 +1061,24 @@ public class DataNode extends ReconfigurableBase
     }
   }
 
+  private String reconfSlowIoWarningThresholdParameters(String property, 
String newVal)
+      throws ReconfigurationException {
+    String result;
+    try {
+      LOG.info("Reconfiguring {} to {}", property, newVal);
+      Preconditions.checkNotNull(dnConf, "DNConf has not been initialized.");
+      long slowIoWarningThreshold = (newVal == null ?
+          DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT :
+          Long.parseLong(newVal));
+      result = Long.toString(slowIoWarningThreshold);
+      dnConf.setDatanodeSlowIoWarningThresholdMs(slowIoWarningThreshold);
+      LOG.info("RECONFIGURE* changed {} to {}", property, newVal);
+      return result;
+    } catch (IllegalArgumentException e) {
+      throw new ReconfigurationException(property, newVal, 
getConf().get(property), e);
+    }
+  }
+
   /**
    * Get a list of the keys of the re-configurable properties in configuration.
    */
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
index 14c1c301b1bd..69f0565f2b6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
@@ -49,6 +49,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_OUTLIER_DETE
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_ENABLED;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_PLAN_VALID_INTERVAL;
@@ -916,4 +918,34 @@ public class TestDataNodeReconfiguration {
       assertEquals(60000, 
dn.getDiskBalancer().getPlanValidityIntervalInConfig());
     }
   }
+
+  @Test
+  public void testSlowIoWarningThresholdReconfiguration() throws Exception {
+    int slowIoWarningThreshold = 500;
+    for (int i = 0; i < NUM_DATA_NODE; i++) {
+      DataNode dn = cluster.getDataNodes().get(i);
+
+      // Verify DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY.
+      // Try invalid values.
+      LambdaTestUtils.intercept(ReconfigurationException.class,
+          "Could not change property dfs.datanode.slow.io.warning.threshold.ms 
from "
+              + "'300' to 'text'",
+          () -> 
dn.reconfigureProperty(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, "text"));
+      LambdaTestUtils.intercept(ReconfigurationException.class,
+          "Could not change property dfs.datanode.slow.io.warning.threshold.ms 
from "
+              + "'300' to '-1'",
+          () -> 
dn.reconfigureProperty(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, "-1"));
+
+      // Set value is 500.
+      dn.reconfigureProperty(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
+          String.valueOf(slowIoWarningThreshold));
+      assertEquals(slowIoWarningThreshold, 
dn.getDnConf().getSlowIoWarningThresholdMs());
+
+      // Set default value.
+      dn.reconfigureProperty(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, null);
+      assertEquals(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT,
+          dn.getDnConf().getSlowIoWarningThresholdMs());
+    }
+  }
+
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 2a3fba83ffcb..4c7268e68e7d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -347,7 +347,7 @@ public class TestDFSAdmin {
     final List<String> outs = Lists.newArrayList();
     final List<String> errs = Lists.newArrayList();
     getReconfigurableProperties("datanode", address, outs, errs);
-    assertEquals(25, outs.size());
+    assertEquals(26, outs.size());
     assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, outs.get(1));
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to