This is an automated email from the ASF dual-hosted git repository.
okumin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new f75c18b57ae HIVE-29117: Refactor deleteDir and moveToTrash (#6006)
f75c18b57ae is described below
commit f75c18b57ae4218db689d1e76fabb1343fd78973
Author: Wechar Yu <[email protected]>
AuthorDate: Wed Oct 8 15:39:09 2025 +0800
HIVE-29117: Refactor deleteDir and moveToTrash (#6006)
* HIVE-29117: Refactor deleteDir and moveToTrash
* fix compile and remove unnecessary MetaStoreFS interface
* return void in deleteDir()
* fix sonar reports warnnings
* Revert "return void in deleteDir()"
This reverts commit 7ad708b85de03d8cca2206600ae0e58df0ddee55.
---
.../org/apache/hadoop/hive/common/FileUtils.java | 36 -------------
.../java/org/apache/hadoop/hive/conf/HiveConf.java | 5 --
.../storage/archive/AlterTableArchiveUtils.java | 2 +-
.../org/apache/hadoop/hive/ql/metadata/Hive.java | 4 +-
.../ql/metadata/SessionHiveMetaStoreClient.java | 6 +--
.../hive/ql/parse/MetaDataExportListener.java | 2 +-
.../hadoop/hive/ql/txn/compactor/FSRemover.java | 2 +-
...MetastoreClientExchangePartitionsTempTable.java | 2 +-
.../apache/hadoop/hive/metastore/MetaStoreFS.java | 43 ---------------
.../apache/hadoop/hive/metastore/Warehouse.java | 37 +++++--------
.../hadoop/hive/metastore/conf/MetastoreConf.java | 2 -
.../hadoop/hive/metastore/utils/FileUtils.java | 62 +++++++++++-----------
.../apache/hadoop/hive/metastore/HMSHandler.java | 34 ++++++------
.../hadoop/hive/metastore/HiveAlterHandler.java | 2 +-
.../hadoop/hive/metastore/HiveMetaStoreFsImpl.java | 47 ----------------
.../hive/metastore/leader/AuditLeaderListener.java | 2 +-
.../hive/metastore/leader/TestLeaderListener.java | 2 +-
.../minihms/AbstractMetaStoreService.java | 6 +--
18 files changed, 75 insertions(+), 221 deletions(-)
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 375a94c6b13..183f0e3f921 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -57,7 +57,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.PathExistsException;
@@ -963,41 +962,6 @@ public static boolean distCpWithSnapshot(String
oldSnapshot, String newSnapshot,
return copied;
}
- /**
- * Move a particular file or directory to the trash.
- * @param fs FileSystem to use
- * @param f path of file or directory to move to trash.
- * @param conf
- * @return true if move successful
- * @throws IOException
- */
- public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf,
boolean purge)
- throws IOException {
- LOG.debug("deleting " + f);
- boolean result = false;
- try {
- if(purge) {
- LOG.debug("purge is set to true. Not moving to Trash " + f);
- } else {
- result = Trash.moveToAppropriateTrash(fs, f, conf);
- if (result) {
- LOG.trace("Moved to trash: " + f);
- return true;
- }
- }
- } catch (IOException ioe) {
- // for whatever failure reason including that trash has lower encryption
zone
- // retry with force delete
- LOG.warn(ioe.getMessage() + "; Force to delete it.");
- }
-
- result = fs.delete(f, true);
- if (!result) {
- LOG.error("Failed to delete " + f);
- }
- return result;
- }
-
public static boolean rename(FileSystem fs, Path sourcePath,
Path destPath, Configuration conf) throws
IOException {
LOG.info("Renaming " + sourcePath + " to " + destPath);
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index fa89ee7be8e..094c6cfd2fb 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1736,11 +1736,6 @@ public static enum ConfVars {
CLI_PROMPT("hive.cli.prompt", "hive",
"Command line prompt configuration value. Other hiveconf can be used
in this configuration value. \n" +
"Variable substitution will only be invoked at the Hive CLI startup."),
- /**
- * @deprecated Use MetastoreConf.FS_HANDLER_CLS
- */
- @Deprecated
- HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class",
"org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
// Things we log in the jobconf
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveUtils.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveUtils.java
index 84743a7bf17..538c1bd9fc4 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveUtils.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveUtils.java
@@ -97,7 +97,7 @@ static Path getInterMediateDir(Path dir, Configuration conf,
ConfVars suffixConf
static void deleteDir(Path dir, boolean shouldEnableCm, Configuration conf)
throws HiveException {
try {
Warehouse wh = new Warehouse(conf);
- wh.deleteDir(dir, true, false, shouldEnableCm);
+ wh.deleteDir(dir, false, shouldEnableCm);
} catch (MetaException e) {
throw new HiveException(e);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 6a332a10f19..07b98266d85 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -5948,13 +5948,13 @@ public static boolean trashFiles(final FileSystem fs,
final FileStatus[] statuse
final SessionState parentSession = SessionState.get();
for (final FileStatus status : statuses) {
if (null == pool) {
- result &= FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
+ result &=
org.apache.hadoop.hive.metastore.utils.FileUtils.deleteDir(fs,
status.getPath(), purge, conf);
} else {
futures.add(pool.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
SessionState.setCurrentSessionState(parentSession);
- return FileUtils.moveToTrash(fs, status.getPath(), conf, purge);
+ return
org.apache.hadoop.hive.metastore.utils.FileUtils.deleteDir(fs,
status.getPath(), purge, conf);
}
}));
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 5a897cfd24b..8af51d95200 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -1062,7 +1062,7 @@ private void truncateTempTable(Table table) throws
TException {
HdfsUtils.HadoopFileStatus status =
HdfsUtils.HadoopFileStatus.createInstance(conf, fs, location);
FileStatus targetStatus = fs.getFileStatus(location);
String targetGroup = targetStatus == null ? null :
targetStatus.getGroup();
- FileUtils.moveToTrash(fs, location, conf, isSkipTrash);
+ org.apache.hadoop.hive.metastore.utils.FileUtils.deleteDir(fs,
location, isSkipTrash, conf);
fs.mkdirs(location);
HdfsUtils.setFullFileStatus(conf, status, targetGroup, fs, location,
false);
} else {
@@ -1124,7 +1124,7 @@ private void dropTempTable(Table table, boolean
deleteData,
// Delete table data
if (deleteData && !isExternalTable(table)) {
try {
- getWh().deleteDir(tablePath, true, ifPurge, false);
+ getWh().deleteDir(tablePath, ifPurge, false);
} catch (Exception err) {
LOG.error("Failed to delete temp table directory: " + tablePath, err);
// Forgive error
@@ -2215,7 +2215,7 @@ private boolean deletePartitionLocation(Partition
partition, boolean purgeData)
Path path = getWh().getDnsPath(new Path(location));
try {
do {
- if (!getWh().deleteDir(path, true, purgeData, false)) {
+ if (!getWh().deleteDir(path, purgeData, false)) {
throw new MetaException("Unable to delete partition at " +
location);
}
path = path.getParent();
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java
index b6d8a284952..741a74f17e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java
@@ -90,7 +90,7 @@ private void export_meta_data(PreDropTableEvent tableEvent)
throws MetaException
EximUtil.createExportDump(fs, outFile, mTbl, null, null,
new HiveConf(conf, MetaDataExportListener.class));
if (moveMetadataToTrash == true) {
- wh.deleteDir(metaPath, true, false, false);
+ wh.deleteDir(metaPath, false, false);
}
} catch (IOException | SemanticException e) {
throw new MetaException(e.getMessage());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/FSRemover.java
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/FSRemover.java
index cffef09f127..82231d30d4e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/FSRemover.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/FSRemover.java
@@ -122,7 +122,7 @@ private List<Path> removeFiles(CleanupRequest cr)
if (needCmRecycle) {
replChangeManager.recycle(dead, ReplChangeManager.RecycleType.MOVE,
cr.isPurge());
}
- if (FileUtils.moveToTrash(fs, dead, conf, cr.isPurge())) {
+ if (FileUtils.deleteDir(fs, dead, cr.isPurge(), conf)) {
deleted.add(dead);
}
}
diff --git
a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientExchangePartitionsTempTable.java
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientExchangePartitionsTempTable.java
index 836483e2a93..fe5432e01e8 100644
---
a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientExchangePartitionsTempTable.java
+++
b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientExchangePartitionsTempTable.java
@@ -356,7 +356,7 @@ private Table createNonTempTable(String dbName, String
tableName, List<FieldSche
}
private void cleanTempTableDir(Table table) throws MetaException {
- wh.deleteDir(new Path(table.getSd().getLocation()), true, false, false);
+ wh.deleteDir(new Path(table.getSd().getLocation()), false, false);
}
}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
deleted file mode 100644
index ddcda4c41f6..00000000000
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-
-/**
- * Define a set of APIs that may vary in different environments
- */
-public interface MetaStoreFS {
-
- /**
- * delete a directory
- *
- * @param f
- * @param ifPurge
- * @param recursive
- * @return true on success
- * @throws MetaException
- */
- public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
- boolean ifPurge, Configuration conf) throws MetaException;
-
-}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
index 5e639cd1067..5345c45fb64 100755
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -40,7 +40,6 @@
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
-import org.apache.hadoop.hive.metastore.utils.JavaUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -57,7 +56,6 @@
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.util.ReflectionUtils;
import static
org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE_PATTERN;
import static
org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_PATH_SUFFIX;
@@ -83,7 +81,6 @@ public class Warehouse {
public static final Logger LOG =
LoggerFactory.getLogger("hive.metastore.warehouse");
- private MetaStoreFS fsHandler = null;
private boolean storageAuthCheck = false;
private ReplChangeManager cm = null;
@@ -95,27 +92,11 @@ public Warehouse(Configuration conf) throws MetaException {
+ " is not set in the config or blank");
}
whRootExternalString = MetastoreConf.getVar(conf,
ConfVars.WAREHOUSE_EXTERNAL);
- fsHandler = getMetaStoreFsHandler(conf);
cm = ReplChangeManager.getInstance(conf);
storageAuthCheck = MetastoreConf.getBoolVar(conf,
ConfVars.AUTHORIZATION_STORAGE_AUTH_CHECKS);
isTenantBasedStorage = MetastoreConf.getBoolVar(conf,
ConfVars.ALLOW_TENANT_BASED_STORAGE);
}
- private MetaStoreFS getMetaStoreFsHandler(Configuration conf)
- throws MetaException {
- String handlerClassStr = MetastoreConf.getVar(conf,
ConfVars.FS_HANDLER_CLS);
- try {
- Class<? extends MetaStoreFS> handlerClass = (Class<? extends
MetaStoreFS>) Class
- .forName(handlerClassStr, true, JavaUtils.getClassLoader());
- MetaStoreFS handler = ReflectionUtils.newInstance(handlerClass, conf);
- return handler;
- } catch (ClassNotFoundException e) {
- throw new MetaException("Error in loading MetaStoreFS handler."
- + e.getMessage());
- }
- }
-
-
/**
* Helper functions to convert IOException to MetaException
*/
@@ -451,15 +432,15 @@ void addToChangeManagement(Path file) throws
MetaException {
}
}
- public boolean deleteDir(Path f, boolean recursive, Database db) throws
MetaException {
- return deleteDir(f, recursive, false, db);
+ public boolean deleteDir(Path f, Database db) throws MetaException {
+ return deleteDir(f, false, db);
}
- public boolean deleteDir(Path f, boolean recursive, boolean ifPurge,
Database db) throws MetaException {
- return deleteDir(f, recursive, ifPurge,
ReplChangeManager.isSourceOfReplication(db));
+ public boolean deleteDir(Path f, boolean ifPurge, Database db) throws
MetaException {
+ return deleteDir(f, ifPurge, ReplChangeManager.isSourceOfReplication(db));
}
- public boolean deleteDir(Path f, boolean recursive, boolean ifPurge, boolean
needCmRecycle) throws MetaException {
+ public boolean deleteDir(Path f, boolean ifPurge, boolean needCmRecycle)
throws MetaException {
if (needCmRecycle) {
try {
cm.recycle(f, RecycleType.MOVE, ifPurge);
@@ -472,7 +453,13 @@ public boolean deleteDir(Path f, boolean recursive,
boolean ifPurge, boolean nee
LOG.warn("Har path {} is not supported to delete, skipping it.", f);
return true;
}
- return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf);
+ boolean delete = false;
+ try {
+ delete = FileUtils.deleteDir(fs, f, ifPurge, conf);
+ } catch (IOException e) {
+ MetaStoreUtils.throwMetaException(e);
+ }
+ return delete;
}
public void recycleDirToCmPath(Path f, boolean ifPurge) throws MetaException
{
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 714a4eb612d..e2c313ee7a0 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -906,8 +906,6 @@ public enum ConfVars {
org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl.class.getName(),
"Metastore hook class for filtering the metadata read results. If
hive.security.authorization.manager"
+ "is set to instance of HiveAuthorizerFactory, then this value is
ignored."),
- FS_HANDLER_CLS("metastore.fs.handler.class",
"hive.metastore.fs.handler.class",
- "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
FS_HANDLER_THREADS_COUNT("metastore.fshandler.threads",
"hive.metastore.fshandler.threads", 15,
"Number of threads to be allocated for metastore handler for fs
operations."),
HMS_HANDLER_ATTEMPTS("metastore.hmshandler.retry.attempts",
"hive.hmshandler.retry.attempts", 10,
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
index 795528f2ec4..7f9dce2b59c 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
@@ -71,41 +71,20 @@ public boolean accept(Path p) {
}
};
- /**
- * Move a particular file or directory to the trash.
- * @param fs FileSystem to use
- * @param f path of file or directory to move to trash.
- * @param conf configuration object
- * @return true if move successful
- * @throws IOException
- */
- public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf,
boolean purge)
+ public static boolean deleteDir(FileSystem fs, Path f, boolean ifPurge,
Configuration conf)
throws IOException {
- LOG.debug("deleting " + f);
- boolean result;
+ if (!fs.exists(f)) {
+ LOG.warn("The path to delete does not exist: {}", f);
+ return true;
+ }
+ if (!ifPurge && moveToTrash(fs, f, conf)) {
+ return true;
+ }
+ boolean result = false;
try {
- if (!fs.exists(f)) {
- LOG.warn("The path to moveToTrash does not exist: " + f);
- return true;
- }
- if (purge) {
- LOG.debug("purge is set to true. Not moving to Trash " + f);
- } else {
- result = Trash.moveToAppropriateTrash(fs, f, conf);
- if (result) {
- LOG.trace("Moved to trash: " + f);
- return true;
- }
- }
- } catch (IOException ioe) {
// for whatever failure reason including that trash has lower encryption
zone
// retry with force delete
- LOG.warn(ioe.getMessage() + "; Force to delete it.");
- }
-
- try {
result = fs.delete(f, true);
-
} catch (RemoteException | SnapshotException se) {
// If this is snapshot exception or the cause is snapshot replication
from HDFS, could be the case where the
// snapshots were created by replication, so in that case attempt to
delete the replication related snapshots,
@@ -117,11 +96,32 @@ public static boolean moveToTrash(FileSystem fs, Path f,
Configuration conf, boo
result = fs.delete(f, true);
}
if (!result) {
- LOG.error("Failed to delete " + f);
+ LOG.error("Failed to delete {}", f);
}
return result;
}
+ /**
+ * Move a particular file or directory to the trash.
+ * @param fs FileSystem to use
+ * @param f path of file or directory to move to trash.
+ * @param conf configuration object
+ * @return true if move successful
+ */
+ private static boolean moveToTrash(FileSystem fs, Path f, Configuration
conf) {
+ LOG.debug("moving {} to trash", f);
+ try {
+ boolean result = Trash.moveToAppropriateTrash(fs, f, conf);
+ if (result) {
+ LOG.trace("Moved to trash: {}", f);
+ return true;
+ }
+ } catch (IOException ioe) {
+ LOG.warn("Failed to move path to trash: {}", f, ioe);
+ }
+ return false;
+ }
+
/**
* Attempts to delete the replication related snapshots
* @param fs the filesystem
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index 086766de41b..423dffb455f 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -952,7 +952,7 @@ public void create_catalog(CreateCatalogRequest rqst)
if (!success) {
ms.rollbackTransaction();
if (madeDir) {
- wh.deleteDir(catPath, true, false, false);
+ wh.deleteDir(catPath, false, false);
}
}
@@ -1139,7 +1139,7 @@ private void dropCatalogCore(String catName, boolean
ifExists)
}
} finally {
if (success) {
- wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false,
false, false);
+ wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false,
false);
} else {
ms.rollbackTransaction();
}
@@ -2545,7 +2545,7 @@ private void create_table_core(final RawStore ms, final
CreateTableRequest req)
if (!success) {
ms.rollbackTransaction();
if (madeDir) {
- wh.deleteDir(tblPath, true, false,
ReplChangeManager.shouldEnableCm(db, tbl));
+ wh.deleteDir(tblPath, false, ReplChangeManager.shouldEnableCm(db,
tbl));
}
}
@@ -3136,7 +3136,7 @@ private void deleteTableData(Path tablePath, boolean
ifPurge, boolean shouldEnab
private void deleteTableData(Path tablePath, boolean ifPurge, Database db) {
if (tablePath != null) {
try {
- wh.deleteDir(tablePath, true, ifPurge, db);
+ wh.deleteDir(tablePath, ifPurge, db);
} catch (Exception e) {
LOG.error("Failed to delete table directory: " + tablePath +
" " + e.getMessage());
@@ -3174,7 +3174,7 @@ private void deletePartitionData(List<Path> partPaths,
boolean ifPurge, Database
if (partPaths != null && !partPaths.isEmpty()) {
for (Path partPath : partPaths) {
try {
- wh.deleteDir(partPath, true, ifPurge, db);
+ wh.deleteDir(partPath, ifPurge, db);
} catch (Exception e) {
LOG.error("Failed to delete partition directory: " + partPath +
" " + e.getMessage());
@@ -3199,16 +3199,16 @@ private void deleteDataExcludeCmroot(Path path, boolean
ifPurge, boolean shouldE
FileStatus[] statuses = path.getFileSystem(conf).listStatus(path,
ReplChangeManager.CMROOT_PATH_FILTER);
for (final FileStatus status : statuses) {
- wh.deleteDir(status.getPath(), true, ifPurge, shouldEnableCm);
+ wh.deleteDir(status.getPath(), ifPurge, shouldEnableCm);
}
//Check if table directory is empty, delete it
FileStatus[] statusWithoutFilter =
path.getFileSystem(conf).listStatus(path);
if (statusWithoutFilter.length == 0) {
- wh.deleteDir(path, true, ifPurge, shouldEnableCm);
+ wh.deleteDir(path, ifPurge, shouldEnableCm);
}
} else {
//If no cm delete the complete table directory
- wh.deleteDir(path, true, ifPurge, shouldEnableCm);
+ wh.deleteDir(path, ifPurge, shouldEnableCm);
}
} catch (Exception e) {
LOG.error("Failed to delete directory: {}", path, e);
@@ -3596,7 +3596,7 @@ private void truncateDataFiles(Path location, boolean
isSkipTrash, boolean needC
FileStatus targetStatus = fs.getFileStatus(location);
String targetGroup = targetStatus == null ? null :
targetStatus.getGroup();
- wh.deleteDir(location, true, isSkipTrash, needCmRecycle);
+ wh.deleteDir(location, isSkipTrash, needCmRecycle);
fs.mkdirs(location);
HdfsUtils.setFullFileStatus(getConf(), status, targetGroup, fs,
location, false);
} else {
@@ -3605,7 +3605,7 @@ private void truncateDataFiles(Path location, boolean
isSkipTrash, boolean needC
return;
}
for (final FileStatus status : statuses) {
- wh.deleteDir(status.getPath(), true, isSkipTrash, needCmRecycle);
+ wh.deleteDir(status.getPath(), isSkipTrash, needCmRecycle);
}
}
}
@@ -4112,7 +4112,7 @@ private Partition append_partition_common(RawStore ms,
String catName, String db
if (!success) {
ms.rollbackTransaction();
if (madeDir) {
- wh.deleteDir(partLocation, true, false,
ReplChangeManager.shouldEnableCm(db, tbl));
+ wh.deleteDir(partLocation, false,
ReplChangeManager.shouldEnableCm(db, tbl));
}
}
@@ -5120,9 +5120,9 @@ private boolean drop_partition_common(RawStore ms, String
catName, String db_nam
// Archived partitions have har:/to_har_file as their location.
// The original directory was saved in params
if (isArchived) {
- wh.deleteDir(archiveParentDir, true, mustPurge, needsCm);
+ wh.deleteDir(archiveParentDir, mustPurge, needsCm);
} else {
- wh.deleteDir(partPath, true, mustPurge, needsCm);
+ wh.deleteDir(partPath, mustPurge, needsCm);
deleteParentRecursive(partPath.getParent(), part_vals.size() - 1,
mustPurge, needsCm);
}
// ok even if the data is not deleted
@@ -5181,7 +5181,7 @@ private boolean isDatabaseRemote(String name) {
private void deleteParentRecursive(Path parent, int depth, boolean
mustPurge, boolean needRecycle)
throws IOException, MetaException {
if (depth > 0 && parent != null && wh.isWritable(parent) &&
wh.isEmptyDir(parent)) {
- wh.deleteDir(parent, true, mustPurge, needRecycle);
+ wh.deleteDir(parent, mustPurge, needRecycle);
deleteParentRecursive(parent.getParent(), depth - 1, mustPurge,
needRecycle);
}
}
@@ -5372,7 +5372,7 @@ public DropPartitionsResult drop_partitions_req(
// Archived partitions have har:/to_har_file as their location.
// The original directory was saved in params
for (Path path : archToDelete) {
- wh.deleteDir(path, true, mustPurge, needsCm);
+ wh.deleteDir(path, mustPurge, needsCm);
}
// Uses a priority queue to delete the parents of deleted directories
if empty.
@@ -5381,7 +5381,7 @@ public DropPartitionsResult drop_partitions_req(
// avoided.
PriorityQueue<PathAndDepth> parentsToDelete = new PriorityQueue<>();
for (PathAndDepth p : dirsToDelete) {
- wh.deleteDir(p.path, true, mustPurge, needsCm);
+ wh.deleteDir(p.path, mustPurge, needsCm);
addParentForDel(parentsToDelete, p);
}
@@ -5396,7 +5396,7 @@ public DropPartitionsResult drop_partitions_req(
Path path = p.path;
if (wh.isWritable(path) && wh.isEmptyDir(path)) {
- wh.deleteDir(path, true, mustPurge, needsCm);
+ wh.deleteDir(path, mustPurge, needsCm);
addParentForDel(parentsToDelete, p);
}
} catch (IOException ex) {
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 48db8dae825..d78dd9ab8ce 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -488,7 +488,7 @@ public List<Void> run(List<Partition> input) throws
Exception {
Path deleteOldDataLoc = new Path(oldt.getSd().getLocation());
boolean isSkipTrash =
MetaStoreUtils.isSkipTrash(oldt.getParameters());
try {
- wh.deleteDir(deleteOldDataLoc, true, isSkipTrash,
+ wh.deleteDir(deleteOldDataLoc, isSkipTrash,
ReplChangeManager.shouldEnableCm(olddb, oldt));
LOG.info("Deleted the old data location: {} for the table: {}",
deleteOldDataLoc, databaseName + "." + tableName);
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
deleted file mode 100644
index 9721668be18..00000000000
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.metastore.utils.FileUtils;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-
-public class HiveMetaStoreFsImpl implements MetaStoreFS {
-
- public static final Logger LOG = LoggerFactory
- .getLogger("hive.metastore.hivemetastoreFsimpl");
-
- @Override
- public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
- boolean ifPurge, Configuration conf) throws MetaException {
- try {
- if (FileUtils.moveToTrash(fs, f, conf, ifPurge)) {
- return true;
- }
- } catch (Exception e) {
- MetaStoreUtils.throwMetaException(e);
- }
- throw new MetaException("Unable to delete directory: " + f);
- }
-}
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/AuditLeaderListener.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/AuditLeaderListener.java
index 0270da27f60..f060f755fcf 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/AuditLeaderListener.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/AuditLeaderListener.java
@@ -146,7 +146,7 @@ public void takeLeadership(LeaderElection election) throws
Exception {
thisLeaderFiles.sort((Comparator.comparing(o ->
o.getPath().getName())));
// delete the files that beyond the limit
for (int i = 0; i < thisLeaderFiles.size() - limit; i++) {
- FileUtils.moveToTrash(fs, thisLeaderFiles.get(i).getPath(),
configuration, true);
+ FileUtils.deleteDir(fs, thisLeaderFiles.get(i).getPath(), false,
configuration);
}
}
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/leader/TestLeaderListener.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/leader/TestLeaderListener.java
index 768dd03810d..374d86a0618 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/leader/TestLeaderListener.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/leader/TestLeaderListener.java
@@ -91,7 +91,7 @@ public void testAuditLeaderListener() throws Exception {
Assert.assertTrue(fileStatuses.size() == 1);
Assert.assertTrue(fileStatuses.get(0).getPath().getName().equals("leader_testAuditLeaderListener.json"));
} finally {
- FileUtils.moveToTrash(fileSystem, location, conf, true);
+ FileUtils.deleteDir(fileSystem, location, false, conf);
}
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
index 9a66c493029..0851859f791 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
@@ -166,9 +166,9 @@ public void createFile(Path path, String content) throws
IOException {
* @throws MetaException IO failure
*/
public void cleanWarehouseDirs() throws MetaException {
- warehouse.deleteDir(getWarehouseRoot(), true, true, false);
- warehouse.deleteDir(getExternalWarehouseRoot(), true, true, false);
- warehouse.deleteDir(trashDir, true, true, false);
+ warehouse.deleteDir(getWarehouseRoot(), true, false);
+ warehouse.deleteDir(getExternalWarehouseRoot(), true, false);
+ warehouse.deleteDir(trashDir, true, false);
}
/**