This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 14f5055b3a2 HDFS-17853. Support to make
dfs.namenode.fs-limits.max-directory-items reconfigurable (#8064). Contributed
by caozhiqiang
14f5055b3a2 is described below
commit 14f5055b3a2c9df7c73392cbceea1fe7eff2348d
Author: caozhiqiang <[email protected]>
AuthorDate: Thu Nov 27 23:47:48 2025 +0800
HDFS-17853. Support to make dfs.namenode.fs-limits.max-directory-items
reconfigurable (#8064). Contributed by caozhiqiang
---
.../hadoop/hdfs/server/namenode/FSDirectory.java | 23 ++++++++++++++----
.../hadoop/hdfs/server/namenode/NameNode.java | 24 ++++++++++++++++++-
.../server/namenode/TestNameNodeReconfigure.java | 27 ++++++++++++++++++++++
.../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java | 8 ++++---
4 files changed, 73 insertions(+), 9 deletions(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index dd6e021a80d..a6bd57b4963 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -159,7 +159,7 @@ private static INodeDirectory createRoot(FSNamesystem
namesystem) {
private final FSNamesystem namesystem;
private volatile boolean skipQuotaCheck = false; //skip while consuming edits
private final int maxComponentLength;
- private final int maxDirItems;
+ private volatile int maxDirItems;
private final int lsLimit; // max list limit
private final int contentCountLimit; // max content summary counts per run
private final long contentSleepMicroSec;
@@ -217,6 +217,11 @@ private static INodeDirectory createRoot(FSNamesystem
namesystem) {
// authorizeWithContext() API or not.
private boolean useAuthorizationWithContextAPI = false;
+ // We need a maximum maximum because by default, PB limits message sizes
+ // to 64MB. This means we can only store approximately 6.7 million entries
+ // per directory, but let's use 6.4 million for some safety.
+ private static final int MAX_DIR_ITEMS = 64 * 100 * 1000;
+
public void setINodeAttributeProvider(
@Nullable INodeAttributeProvider provider) {
attributeProvider = provider;
@@ -395,10 +400,6 @@ public enum DirOp {
Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
"Cannot set a negative limit on the number of xattrs per inode (%s).",
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
- // We need a maximum maximum because by default, PB limits message sizes
- // to 64MB. This means we can only store approximately 6.7 million entries
- // per directory, but let's use 6.4 million for some safety.
- final int MAX_DIR_ITEMS = 64 * 100 * 1000;
Preconditions.checkArgument(
maxDirItems > 0 && maxDirItems <= MAX_DIR_ITEMS, "Cannot set "
+ DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY
@@ -580,6 +581,18 @@ String setProtectedDirectories(String protectedDirsString)
{
return Joiner.on(",").skipNulls().join(protectedDirectories);
}
+ public void setMaxDirItems(int newVal) {
+ Preconditions.checkArgument(
+ newVal > 0 && newVal <= MAX_DIR_ITEMS, "Cannot set "
+ + DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY
+ + " to a value less than 1 or greater than " + MAX_DIR_ITEMS);
+ maxDirItems = newVal;
+ }
+
+ public int getMaxDirItems() {
+ return maxDirItems;
+ }
+
BlockManager getBlockManager() {
return getFSNamesystem().getBlockManager();
}
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 7f1ed204163..98192cd66d6 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -143,6 +143,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_DEFAULT;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
+import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT;
+import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_DEFAULT;
@@ -385,7 +387,8 @@ public enum OperationCategory {
DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY,
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
- DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY));
+ DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY,
+ DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY));
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
@@ -2388,6 +2391,8 @@ protected String reconfigurePropertyImpl(String property,
String newVal)
|| property.equals(DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY)
||
property.equals(DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY)) {
return reconfigureFSNamesystemLockMetricsParameters(property, newVal);
+ } else if (property.equals(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY)) {
+ return reconfigureMaxDirItems(newVal);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
property));
@@ -2806,6 +2811,23 @@ private String
reconfigureFSNamesystemLockMetricsParameters(final String propert
}
}
+ private String reconfigureMaxDirItems(String newVal) throws
ReconfigurationException {
+ int newSetting;
+ namesystem.writeLock(RwLockMode.BM);
+ try {
+ getNamesystem().getFSDirectory()
+
.setMaxDirItems(adjustNewVal(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT, newVal));
+ newSetting = getNamesystem().getFSDirectory().getMaxDirItems();
+ LOG.info("RECONFIGURE* changed {} to {}",
DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, newSetting);
+ return String.valueOf(newSetting);
+ } catch (IllegalArgumentException e) {
+ throw new ReconfigurationException(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
newVal,
+ getConf().get(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY), e);
+ } finally {
+ namesystem.writeUnlock(RwLockMode.BM, "reconfigureMaxDirItems");
+ }
+ }
+
@Override // ReconfigurableBase
protected Configuration getNewConf() {
return new HdfsConfiguration();
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index d45bdb20730..d60d76b6dab 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -39,6 +39,7 @@
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
+import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -865,6 +866,32 @@ public void testReconfigureSlowPeerCollectInterval()
throws Exception {
assertEquals(600000, datanodeManager.getSlowPeerCollectionInterval());
}
+ @Test
+ public void testReconfigureMaxDirItems() throws Exception {
+ final NameNode nameNode = cluster.getNameNode();
+ final FSDirectory fsd = nameNode.namesystem.getFSDirectory();
+
+ // By default, DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY is 1024 * 1024.
+ assertEquals(1024 * 1024, fsd.getMaxDirItems());
+
+ // Reconfigure.
+ nameNode.reconfigureProperty(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
+ Integer.toString(1024 * 1024 * 2));
+
+ // Assert DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY is 1024 * 1024 * 2.
+ assertEquals(1024 * 1024 * 2, fsd.getMaxDirItems());
+
+ // Reconfigure to negative, and expect failed.
+ LambdaTestUtils.intercept(ReconfigurationException.class,
+ "Could not change property dfs.namenode.fs-limits.max-directory-items
from '"
+ + 1024 * 1024 * 2 + "' to '" + 1024 * 1024 * -1 + "'",
+ () ->
nameNode.reconfigureProperty(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
+ Integer.toString(1024 * 1024 * -1)));
+
+ // Assert DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY is also 1024 * 1024
* 2.
+ assertEquals(1024 * 1024 * 2, fsd.getMaxDirItems());
+ }
+
@AfterEach
public void shutDown() throws IOException {
if (cluster != null) {
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 3ff17b19226..69d1eade5a2 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -105,6 +105,7 @@
import org.slf4j.LoggerFactory;
import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
+import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY;
import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -449,7 +450,7 @@ public void testNameNodeGetReconfigurableProperties()
throws IOException, Interr
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
- assertEquals(29, outs.size());
+ assertEquals(30, outs.size());
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
@@ -463,8 +464,9 @@ public void testNameNodeGetReconfigurableProperties()
throws IOException, Interr
assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_MIN_BLOCKS_FOR_WRITE_KEY,
outs.get(10));
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
outs.get(11));
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
outs.get(12));
- assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(13));
- assertEquals(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY, outs.get(14));
+ assertEquals(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, outs.get(13));
+ assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(14));
+ assertEquals(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY, outs.get(15));
assertEquals(errs.size(), 0);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]