[ 
https://issues.apache.org/jira/browse/HDFS-16813?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17622602#comment-17622602
 ] 

ASF GitHub Bot commented on HDFS-16813:
---------------------------------------

ZanderXu commented on code in PR #5063:
URL: https://github.com/apache/hadoop/pull/5063#discussion_r1002439362


##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java:
##########
@@ -108,32 +108,6 @@ void activate(Configuration conf) {
     Preconditions.checkArgument(intervalSecs >= 0, "Cannot set a negative " +
         "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);
 
-    int blocksPerInterval = conf.getInt(
-        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
-        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
-
-    final String deprecatedKey =
-        "dfs.namenode.decommission.nodes.per.interval";
-    final String strNodes = conf.get(deprecatedKey);
-    if (strNodes != null) {
-      LOG.warn("Deprecated configuration key {} will be ignored.",
-          deprecatedKey);
-      LOG.warn("Please update your configuration to use {} instead.",
-          DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
-    }

Review Comment:
   Maybe we need keep this check for the deprecated key 
`dfs.namenode.decommission.nodes.per.interval`.



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java:
##########
@@ -153,10 +127,7 @@ void activate(Configuration conf) {
         TimeUnit.SECONDS);
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Activating DatanodeAdminManager with interval {} seconds, " +
-              "{} max blocks per interval, " +
-              "{} max concurrently tracked nodes.", intervalSecs,
-          blocksPerInterval, maxConcurrentTrackedNodes);
+      LOG.debug("Activating DatanodeAdminManager with interval {} seconds.", 
intervalSecs);

Review Comment:
   Can remove `if (LOG.isBebugEnabled()) {`



##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorBase.java:
##########
@@ -123,6 +123,12 @@ public void setConf(Configuration conf) {
           DFSConfigKeys
               .DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT;
     }
+
+    if (LOG.isDebugEnabled()) {

Review Comment:
   Can remove `if (LOG.isDebugEnabled()) {`.





> Remove parameter validation logic such as 
> dfs.namenode.decommission.blocks.per.interval in DatanodeAdminManager#activate
> ------------------------------------------------------------------------------------------------------------------------
>
>                 Key: HDFS-16813
>                 URL: https://issues.apache.org/jira/browse/HDFS-16813
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>            Reporter: Haiyang Hu
>            Assignee: Haiyang Hu
>            Priority: Major
>
> In DatanodeAdminManager#activate
> {code:java}
> int blocksPerInterval = conf.getInt(
>     DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
>     DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
> final String deprecatedKey =
>     "dfs.namenode.decommission.nodes.per.interval";
> final String strNodes = conf.get(deprecatedKey);
> if (strNodes != null) {
>   LOG.warn("Deprecated configuration key {} will be ignored.",
>       deprecatedKey);
>   LOG.warn("Please update your configuration to use {} instead.",
>       DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
> }
> checkArgument(blocksPerInterval > 0,
>     "Must set a positive value for "
>     + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
> final int maxConcurrentTrackedNodes = conf.getInt(
>     DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
>     DFSConfigKeys
>         .DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
> checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " +
>     "value for "
>     + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
> {code}
> there is no need for parameters
> dfs.namenode.decommission.blocks.per.interval and
> dfs.namenode.decommission.max.concurrent.tracked.nodes to verify.
> Because the parameters are processed in DatanodeAdminMonitorBase and 
> DatanodeAdminDefaultMonitor.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to