This is an automated email from the ASF dual-hosted git repository.
frankchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new c0f4c102571 Remove deprecation + Add forbiddenAPIs (#18603)
c0f4c102571 is described below
commit c0f4c1025713f397aa4dda667fe8971b3913d47c
Author: Virushade <[email protected]>
AuthorDate: Wed Oct 8 09:58:00 2025 +0800
Remove deprecation + Add forbiddenAPIs (#18603)
---
codestyle/druid-forbidden-apis.txt | 2 ++
.../java/org/apache/druid/storage/hdfs/HdfsDataSegmentPuller.java | 5 ++---
.../apache/druid/storage/hdfs/HdfsFileTimestampVersionFinder.java | 8 +++++---
.../java/org/apache/druid/storage/hdfs/tasklog/HdfsTaskLogs.java | 3 ++-
4 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/codestyle/druid-forbidden-apis.txt
b/codestyle/druid-forbidden-apis.txt
index 599e69e66fb..959be162693 100644
--- a/codestyle/druid-forbidden-apis.txt
+++ b/codestyle/druid-forbidden-apis.txt
@@ -60,6 +60,8 @@ org.apache.commons.io.FileUtils#forceMkdir(java.io.File) @
Use org.apache.druid.
org.apache.curator.shaded.** @ Use regular classes, not shaded versions
org.apache.datasketches.memory.Memory#wrap(byte[], int, int,
java.nio.ByteOrder) @ The implementation isn't correct in
datasketches-memory-2.2.0. Please refer to
https://github.com/apache/datasketches-memory/issues/178. Use wrap(byte[]) and
modify the offset by the callers instead
org.apache.druid.testing.simulate.EmbeddedMiddleManager#<init>() @ Middle
Managers should be used for local embedded tests only and not pushed as they
are slower and launch tasks as child processes. Use EmbeddedIndexer instead.
+org.apache.hadoop.fs.FileSystem#isDirectory(org.apache.hadoop.fs.Path) @ Use
fileSystem.getFileStatus(path)#isDirectory() instead.
+org.apache.hadoop.fs.FileSystem#isFile(org.apache.hadoop.fs.Path) @ Use
fileSystem.getFileStatus(path)#isPath instead.
java.lang.Class#getCanonicalName() @ Class.getCanonicalName can return null
for anonymous types, use Class.getName instead.
java.util.concurrent.Executors#newFixedThreadPool(int) @ Executor is
non-daemon and can prevent JVM shutdown, use
org.apache.druid.java.util.common.concurrent.Execs#multiThreaded(int,
java.lang.String) instead.
diff --git
a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsDataSegmentPuller.java
b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsDataSegmentPuller.java
index 5dc6d810cbc..2c6de8e28bb 100644
---
a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsDataSegmentPuller.java
+++
b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsDataSegmentPuller.java
@@ -194,10 +194,9 @@ public class HdfsDataSegmentPuller implements URIDataPuller
}
try {
final FileSystem fs = path.getFileSystem(config);
- if (fs.isDirectory(path)) {
+ if (fs.getFileStatus(path).isDirectory()) {
// -------- directory ---------
-
try {
return RetryUtils.retry(
() -> {
@@ -211,7 +210,7 @@ public class HdfsDataSegmentPuller implements URIDataPuller
final LocatedFileStatus child = children.next();
final Path childPath = child.getPath();
final String fname = childPath.getName();
- if (fs.isDirectory(childPath)) {
+ if (fs.getFileStatus(childPath).isDirectory()) {
log.warn("[%s] is a child directory, skipping",
childPath.toString());
} else {
final File outFile = new File(outDir, fname);
diff --git
a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsFileTimestampVersionFinder.java
b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsFileTimestampVersionFinder.java
index 73631822548..a484bb4a426 100644
---
a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsFileTimestampVersionFinder.java
+++
b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/HdfsFileTimestampVersionFinder.java
@@ -84,10 +84,13 @@ public class HdfsFileTimestampVersionFinder extends
HdfsDataSegmentPuller implem
return RetryUtils.retry(
() -> {
final FileSystem fs = path.getFileSystem(config);
- if (!fs.exists(path)) {
+
+ if (fs.exists(path)) {
+ FileStatus fileStatus = fs.getFileStatus(path);
+ return mostRecentInDir(fileStatus.isDirectory() ? path :
path.getParent(), pattern);
+ } else {
return null;
}
- return mostRecentInDir(fs.isDirectory(path) ? path :
path.getParent(), pattern);
},
shouldRetryPredicate(),
DEFAULT_RETRY_COUNT
@@ -97,5 +100,4 @@ public class HdfsFileTimestampVersionFinder extends
HdfsDataSegmentPuller implem
throw new RuntimeException(e);
}
}
-
}
diff --git
a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/tasklog/HdfsTaskLogs.java
b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/tasklog/HdfsTaskLogs.java
index 535598bc2d7..e0f563eb9d5 100644
---
a/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/tasklog/HdfsTaskLogs.java
+++
b/extensions-core/hdfs-storage/src/main/java/org/apache/druid/storage/hdfs/tasklog/HdfsTaskLogs.java
@@ -185,8 +185,9 @@ public class HdfsTaskLogs implements TaskLogs
Path taskLogDir = new Path(config.getDirectory());
FileSystem fs = taskLogDir.getFileSystem(hadoopConfig);
if (fs.exists(taskLogDir)) {
+ FileStatus taskLogFileStatus = fs.getFileStatus(taskLogDir);
- if (!fs.isDirectory(taskLogDir)) {
+ if (!taskLogFileStatus.isDirectory()) {
throw new IOE("taskLogDir [%s] must be a directory.", taskLogDir);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]