dengzhhu653 commented on code in PR #5851:
URL: https://github.com/apache/hive/pull/5851#discussion_r2660182317


##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/handler/DropDatabaseHandler.java:
##########
@@ -0,0 +1,393 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.handler;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.Batchable;
+import org.apache.hadoop.hive.metastore.HMSHandler;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+import org.apache.hadoop.hive.metastore.RawStore;
+import org.apache.hadoop.hive.metastore.ReplChangeManager;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DropDatabaseRequest;
+import org.apache.hadoop.hive.metastore.api.DropPackageRequest;
+import org.apache.hadoop.hive.metastore.api.DropTableRequest;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.ListPackageRequest;
+import org.apache.hadoop.hive.metastore.api.ListStoredProcedureRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.ResourceUri;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.utils.FileUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.thrift.TException;
+
+import static 
org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_IN_TEST;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.checkTableDataShouldBeDeleted;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.isDbReplicationTarget;
+
+public class DropDatabaseHandler
+    extends AbstractOperationHandler<DropDatabaseRequest, 
DropDatabaseHandler.DropDatabaseResult> {
+  private String name;
+  private Database db;
+  private List<Table> tables;
+  private List<Function> functions;
+  private List<String> procedures;
+  private List<String> packages;
+  private AtomicReference<String> progress;
+  private DropDatabaseResult result;
+
+  DropDatabaseHandler(IHMSHandler handler, DropDatabaseRequest request)
+      throws TException, IOException {
+    super(handler, request.isAsyncDrop(), request);
+  }
+
+  public DropDatabaseResult execute() throws TException, IOException {
+    boolean success = false;
+    Map<String, String> transactionalListenerResponses = 
Collections.emptyMap();
+    RawStore rs = handler.getMS();
+    rs.openTransaction();
+    try {
+      if (MetaStoreUtils.isDatabaseRemote(db)) {
+        if (rs.dropDatabase(db.getCatalogName(), db.getName())) {
+          success = rs.commitTransaction();
+        }
+        return result;
+      }
+      List<Path> partitionPaths = new ArrayList<>();
+      // drop any functions before dropping db
+      for (int i = 0, j = functions.size(); i < functions.size(); i++, j--) {
+        progress.set("Dropping functions from the database, " + j + " 
functions left");
+        Function func = functions.get(i);
+        rs.dropFunction(request.getCatalogName(), request.getName(), 
func.getFunctionName());
+      }
+
+      for (int i = 0, j = procedures.size(); i < procedures.size(); i++, j--) {
+        progress.set("Dropping procedures from the database, " + j + " 
procedures left");
+        String procName = procedures.get(i);
+        rs.dropStoredProcedure(request.getCatalogName(), request.getName(), 
procName);
+      }
+
+      for (int i = 0, j = packages.size(); i < packages.size(); i++, j--) {
+        progress.set("Dropping packages from the database, " + j + " packages 
left");
+        String pkgName = packages.get(i);
+        rs.dropPackage(new DropPackageRequest(request.getCatalogName(), 
request.getName(), pkgName));
+      }
+
+      List<Table> tablesToDrop = sortTablesToDrop();
+      for (int i = 0, j = tablesToDrop.size(); i < tablesToDrop.size(); i++, 
j--) {
+        progress.set("Dropping tables from the database, " + j + " tables 
left");
+        checkInterrupted();
+        Table table = tablesToDrop.get(i);
+        boolean isSoftDelete = TxnUtils.isTableSoftDeleteEnabled(table, 
request.isSoftDelete());
+        boolean tableDataShouldBeDeleted = 
checkTableDataShouldBeDeleted(table, request.isDeleteData())
+            && !isSoftDelete;
+
+        EnvironmentContext context = null;
+        if (isSoftDelete) {
+          context = new EnvironmentContext();
+          context.putToProperties(hive_metastoreConstants.TXN_ID, 
String.valueOf(request.getTxnId()));
+          request.setDeleteManagedDir(false);
+        }
+        DropTableRequest dropRequest = new DropTableRequest(request.getName(), 
table.getTableName());
+        dropRequest.setCatalogName(request.getCatalogName());
+        dropRequest.setEnvContext(context);
+        // Drop the table but not its data
+        dropRequest.setDeleteData(false);
+        dropRequest.setDropPartitions(true);
+        AbstractOperationHandler<DropTableRequest, 
DropTableHandler.DropTableResult> dropTable =
+            AbstractOperationHandler.offer(handler, dropRequest);
+        DropTableHandler.DropTableResult dropTableResult = 
dropTable.getResult();
+        if (tableDataShouldBeDeleted
+            && dropTableResult.success()
+            && dropTableResult.partPaths() != null) {
+          partitionPaths.addAll(dropTableResult.partPaths());
+        }
+      }
+
+      if (rs.dropDatabase(request.getCatalogName(), request.getName())) {
+        if (!handler.getTransactionalListeners().isEmpty()) {
+          checkInterrupted();
+          DropDatabaseEvent dropEvent = new DropDatabaseEvent(db, true, 
handler, isDbReplicationTarget(db));
+          EnvironmentContext context = null;
+          if (!request.isDeleteManagedDir()) {
+            context = new EnvironmentContext();
+            context.putToProperties(hive_metastoreConstants.TXN_ID, 
String.valueOf(request.getTxnId()));
+          }
+          dropEvent.setEnvironmentContext(context);
+          transactionalListenerResponses =
+              
MetaStoreListenerNotifier.notifyEvent(handler.getTransactionalListeners(),
+                  EventMessage.EventType.DROP_DATABASE, dropEvent);
+        }
+        success = rs.commitTransaction();
+      }
+      result.setSuccess(success);
+      result.setPartitionPaths(partitionPaths);
+    } finally {
+      if (!success) {
+        rs.rollbackTransaction();
+      }
+      if (!handler.getListeners().isEmpty()) {
+        MetaStoreListenerNotifier.notifyEvent(handler.getListeners(),
+            EventMessage.EventType.DROP_DATABASE,
+            new DropDatabaseEvent(db, success, handler, 
isDbReplicationTarget(db)),
+            null,
+            transactionalListenerResponses, rs);
+      }
+    }
+    return result;
+  }
+
+  @Override
+  protected void beforeExecute() throws TException, IOException {
+    if ((name = request.getName()) == null) {
+      throw new MetaException("Database name cannot be null.");
+    }
+    RawStore rs = handler.getMS();
+    String catalogName =
+        request.isSetCatalogName() ? request.getCatalogName() : 
MetaStoreUtils.getDefaultCatalog(handler.getConf());
+    request.setCatalogName(catalogName);
+    db = rs.getDatabase(request.getCatalogName(), request.getName());
+    if (!MetastoreConf.getBoolVar(handler.getConf(), HIVE_IN_TEST) && 
ReplChangeManager.isSourceOfReplication(db)) {
+      throw new InvalidOperationException("can not drop a database which is a 
source of replication");
+    }
+
+    List<String> tableNames = 
defaultEmptyList(rs.getAllTables(request.getCatalogName(), request.getName()));
+    functions = 
defaultEmptyList(rs.getFunctionsRequest(request.getCatalogName(), 
request.getName(), null, false));
+    ListStoredProcedureRequest procedureRequest = new 
ListStoredProcedureRequest(request.getCatalogName());
+    procedureRequest.setDbName(request.getName());
+    procedures = defaultEmptyList(rs.getAllStoredProcedures(procedureRequest));
+    ListPackageRequest pkgRequest = new 
ListPackageRequest(request.getCatalogName());
+    pkgRequest.setDbName(request.getName());
+    packages = defaultEmptyList(rs.listPackages(pkgRequest));
+
+    if (!request.isCascade()) {
+      if (!tableNames.isEmpty()) {
+        throw new InvalidOperationException(
+            "Database " + db.getName() + " is not empty. One or more tables 
exist.");
+      }
+      if (!functions.isEmpty()) {
+        throw new InvalidOperationException(
+            "Database " + db.getName() + " is not empty. One or more functions 
exist.");
+      }
+      if (!procedures.isEmpty()) {
+        throw new InvalidOperationException(
+            "Database " + db.getName() + " is not empty. One or more stored 
procedures exist.");
+      }
+      if (!packages.isEmpty()) {
+        throw new InvalidOperationException(
+            "Database " + db.getName() + " is not empty. One or more packages 
exist.");
+      }
+    }
+    Path path = new Path(db.getLocationUri()).getParent();
+    if (!handler.getWh().isWritable(path)) {
+      throw new MetaException("Database not dropped since its external 
warehouse location " + path +
+          " is not writable by " + SecurityUtils.getUser());
+    }
+    path = handler.getWh().getDatabaseManagedPath(db).getParent();
+    if (!handler.getWh().isWritable(path)) {
+      throw new MetaException("Database not dropped since its managed 
warehouse location " + path +
+          " is not writable by " + SecurityUtils.getUser());
+    }
+
+    result = new DropDatabaseResult(db);
+    checkFuncPathToCm();
+    // check the permission of table path to be deleted
+    checkTablePathPermission(rs, tableNames);
+    progress = new AtomicReference<>(
+        String.format("Starting to drop the database with %d tables, %d 
functions, %d procedures and %d packages.",
+            tables.size(), functions.size(), procedures.size(), 
packages.size()));
+
+    ((HMSHandler) handler).firePreEvent(new PreDropDatabaseEvent(db, handler));
+  }
+
+  private void checkFuncPathToCm() {
+    boolean needsCm = ReplChangeManager.isSourceOfReplication(db);
+    List<Path> funcNeedCmPaths = new ArrayList<>();
+    for (Function func : functions) {
+      // if copy of jar to change management fails we fail the metastore 
transaction, since the
+      // user might delete the jars on HDFS externally after dropping the 
function, hence having
+      // a copy is required to allow incremental replication to work correctly.
+      if (func.getResourceUris() != null && !func.getResourceUris().isEmpty()) 
{
+        for (ResourceUri uri : func.getResourceUris()) {
+          if (uri.getUri().toLowerCase().startsWith("hdfs:") && needsCm) {

Review Comment:
   nice catch, will do



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to