This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.6
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 2328707b4477a11b7713aee8f123b780ad48cc25
Author: kunal642 <kunalkapoor...@gmail.com>
AuthorDate: Tue Aug 27 14:49:58 2019 +0530

    [CARBONDATA-3505] Drop database cascade fix
    
    Problem: When 2 databases are created on same location and one of them is 
dropped
    then the folder is also deleted from backend. If we try to drop the 2nd 
database
    then it would try to lookup the other table, but the schema file would not 
exist
    in the backend and the drop will fail.
    
    Solution: Add a check to call CarbonDropDatabaseCommand only if the database
    location exists in the backend.
    
    This closes #3365
---
 .../main/scala/org/apache/spark/sql/CarbonEnv.scala   | 19 ++++++++++++++++++-
 .../command/cache/CarbonShowCacheCommand.scala        |  4 ++--
 .../spark/sql/execution/strategy/DDLStrategy.scala    |  4 +++-
 .../apache/spark/sql/hive/CarbonFileMetastore.scala   |  4 ++--
 4 files changed, 25 insertions(+), 6 deletions(-)

diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala 
b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
index 1cbd156..f2a52d2 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql
 import java.util.concurrent.ConcurrentHashMap
 
 import org.apache.spark.sql.catalyst.TableIdentifier
-import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
+import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, 
NoSuchTableException}
 import org.apache.spark.sql.catalyst.catalog.SessionCatalog
 import org.apache.spark.sql.events.{MergeBloomIndexEventListener, 
MergeIndexEventListener}
 import org.apache.spark.sql.execution.command.cache._
@@ -267,6 +267,23 @@ object CarbonEnv {
   }
 
   /**
+   * Returns true with the database folder exists in file system. False in all 
other scenarios.
+   */
+  def databaseLocationExists(dbName: String,
+      sparkSession: SparkSession, ifExists: Boolean): Boolean = {
+    try {
+      FileFactory.getCarbonFile(getDatabaseLocation(dbName, 
sparkSession)).exists()
+    } catch {
+      case e: NoSuchDatabaseException =>
+        if (ifExists) {
+          false
+        } else {
+          throw e
+        }
+    }
+  }
+
+  /**
    * The method returns the database location
    * if carbon.storeLocation does  point to spark.sql.warehouse.dir then 
returns
    * the database locationUri as database location else follows the old 
behaviour
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
index 45e811a..4b7f680 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
@@ -443,9 +443,9 @@ case class CarbonShowCacheCommand(tableIdentifier: 
Option[TableIdentifier],
       case (_, _, sum, provider) =>
         provider.toLowerCase match {
           case `bloomFilterIdentifier` =>
-            allIndexSize += sum
-          case _ =>
             allDatamapSize += sum
+          case _ =>
+            allIndexSize += sum
         }
     }
     (allIndexSize, allDatamapSize)
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
index 4791687..3ef8cfa 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
@@ -37,6 +37,7 @@ import org.apache.spark.util.{CarbonReflectionUtils, 
DataMapUtil, FileUtils, Spa
 
 import 
org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
 import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.util.{CarbonProperties, DataTypeUtil, 
ThreadLocalSessionInfo}
 import org.apache.carbondata.spark.util.Util
@@ -115,7 +116,8 @@ class DDLStrategy(sparkSession: SparkSession) extends 
SparkStrategy {
           
.setConfigurationToCurrentThread(sparkSession.sessionState.newHadoopConf())
         FileUtils.createDatabaseDirectory(dbName, dbLocation, 
sparkSession.sparkContext)
         ExecutedCommandExec(createDb) :: Nil
-      case drop@DropDatabaseCommand(dbName, ifExists, isCascade) =>
+      case drop@DropDatabaseCommand(dbName,
+      ifExists, isCascade) if CarbonEnv.databaseLocationExists(dbName, 
sparkSession, ifExists) =>
         ExecutedCommandExec(CarbonDropDatabaseCommand(drop)) :: Nil
       case alterTable@CarbonAlterTableCompactionCommand(altertablemodel, _, _) 
=>
         val isCarbonTable = CarbonEnv.getInstance(sparkSession).carbonMetaStore
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
index 7ab2d47..b19b11c 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
@@ -288,13 +288,13 @@ class CarbonFileMetastore extends CarbonMetaStore {
         Some(wrapperTableInfo)
       } else {
         val tableMetadataFile = CarbonTablePath.getSchemaFilePath(tablePath)
-        schemaRefreshTime = FileFactory
-          
.getCarbonFile(CarbonTablePath.getSchemaFilePath(tablePath)).getLastModifiedTime
         val fileType = FileFactory.getFileType(tableMetadataFile)
         if (FileFactory.isFileExist(tableMetadataFile, fileType)) {
           val tableInfo: TableInfo = 
CarbonUtil.readSchemaFile(tableMetadataFile)
           val wrapperTableInfo =
             schemaConverter.fromExternalToWrapperTableInfo(tableInfo, dbName, 
tableName, tablePath)
+          schemaRefreshTime = FileFactory
+            .getCarbonFile(tableMetadataFile).getLastModifiedTime
           Some(wrapperTableInfo)
         } else {
           None

Reply via email to