[ 
https://issues.apache.org/jira/browse/CARBONDATA-1593?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

wyp updated CARBONDATA-1593:
----------------------------
    Description: 
When I run the following code snippet, I get NoSuchTableException:
{code}
scala> import org.apache.spark.sql.SparkSession
scala> import org.apache.spark.sql.CarbonSession._
scala> val carbon = 
SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://mycluster/user/wyp/carbon")
scala> carbon.sql("CREATE TABLE temp.order(id bigint,  order_no 
string,create_time timestamp)  partitioned by (dt string) STORED BY 
'carbondata' tblproperties('partition_type'='RANGE','RANGE_INFO'='2010,2011')")
scala> carbon.sql("ALTER TABLE temp.order_common ADD PARTITION('2012')")
org.apache.spark.sql.catalyst.analysis.NoSuchTableException: Table or view 
'order_common' not found in database 'default';
  at 
org.apache.spark.sql.hive.client.HiveClient$$anonfun$getTable$1.apply(HiveClient.scala:76)
  at 
org.apache.spark.sql.hive.client.HiveClient$$anonfun$getTable$1.apply(HiveClient.scala:76)
  at scala.Option.getOrElse(Option.scala:121)
  at 
org.apache.spark.sql.hive.client.HiveClient$class.getTable(HiveClient.scala:76)
  at 
org.apache.spark.sql.hive.client.HiveClientImpl.getTable(HiveClientImpl.scala:78)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable$1.apply(HiveExternalCatalog.scala:110)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable$1.apply(HiveExternalCatalog.scala:110)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog.withClient(HiveExternalCatalog.scala:95)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog.org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable(HiveExternalCatalog.scala:109)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$getTable$1.apply(HiveExternalCatalog.scala:601)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$getTable$1.apply(HiveExternalCatalog.scala:601)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog.withClient(HiveExternalCatalog.scala:95)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog.getTable(HiveExternalCatalog.scala:600)
  at 
org.apache.spark.sql.hive.HiveMetastoreCatalog.lookupRelation(HiveMetastoreCatalog.scala:106)
  at 
org.apache.spark.sql.hive.HiveSessionCatalog.lookupRelation(HiveSessionCatalog.scala:69)
  at 
org.apache.spark.sql.hive.CarbonSessionCatalog.lookupRelation(CarbonSessionState.scala:83)
  at 
org.apache.spark.sql.internal.CatalogImpl.refreshTable(CatalogImpl.scala:461)
  at 
org.apache.spark.sql.execution.command.AlterTableSplitPartitionCommand.processSchema(carbonTableSchema.scala:283)
  at 
org.apache.spark.sql.execution.command.AlterTableSplitPartitionCommand.run(carbonTableSchema.scala:229)
  at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
  at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
  at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)
  at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
  at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
  at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
  at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
  at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113)
  at 
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87)
  at 
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87)
  at org.apache.spark.sql.Dataset.<init>(Dataset.scala:185)
  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)
  at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592)
  ... 50 elided
{code}

but partition {{2012}} already add to table {{temp.order_common}}:
{code}
scala> carbon.sql("show partitions  temp.order_common").show(100, 100)
+----------------------+
|             partition|
+----------------------+
|       0, dt = DEFAULT|
|         1, dt < 2010 |
|2, 2010 <= dt <  2011 |
|3, 2011 <= dt <  2012 |
+----------------------+

My Spark version is 2.1.0, Carbondata is 1.2.0.
{code}

  was:
When I run the following code snippet, I get NoSuchTableException:
{code}
scala> import org.apache.spark.sql.SparkSession
scala> import org.apache.spark.sql.CarbonSession._
scala> val carbon = 
SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://mycluster/user/wyp/carbon")
scala> carbon.sql("CREATE TABLE temp.order(id bigint,  order_no 
string,create_time timestamp)  partitioned by (dt string) STORED BY 
'carbondata' tblproperties('partition_type'='RANGE','RANGE_INFO'='2010,2011')")
scala> carbon.sql("ALTER TABLE temp.order_common ADD PARTITION('2012')")
org.apache.spark.sql.catalyst.analysis.NoSuchTableException: Table or view 
'order_common' not found in database 'default';
  at 
org.apache.spark.sql.hive.client.HiveClient$$anonfun$getTable$1.apply(HiveClient.scala:76)
  at 
org.apache.spark.sql.hive.client.HiveClient$$anonfun$getTable$1.apply(HiveClient.scala:76)
  at scala.Option.getOrElse(Option.scala:121)
  at 
org.apache.spark.sql.hive.client.HiveClient$class.getTable(HiveClient.scala:76)
  at 
org.apache.spark.sql.hive.client.HiveClientImpl.getTable(HiveClientImpl.scala:78)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable$1.apply(HiveExternalCatalog.scala:110)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable$1.apply(HiveExternalCatalog.scala:110)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog.withClient(HiveExternalCatalog.scala:95)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog.org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable(HiveExternalCatalog.scala:109)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$getTable$1.apply(HiveExternalCatalog.scala:601)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$getTable$1.apply(HiveExternalCatalog.scala:601)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog.withClient(HiveExternalCatalog.scala:95)
  at 
org.apache.spark.sql.hive.HiveExternalCatalog.getTable(HiveExternalCatalog.scala:600)
  at 
org.apache.spark.sql.hive.HiveMetastoreCatalog.lookupRelation(HiveMetastoreCatalog.scala:106)
  at 
org.apache.spark.sql.hive.HiveSessionCatalog.lookupRelation(HiveSessionCatalog.scala:69)
  at 
org.apache.spark.sql.hive.CarbonSessionCatalog.lookupRelation(CarbonSessionState.scala:83)
  at 
org.apache.spark.sql.internal.CatalogImpl.refreshTable(CatalogImpl.scala:461)
  at 
org.apache.spark.sql.execution.command.AlterTableSplitPartitionCommand.processSchema(carbonTableSchema.scala:283)
  at 
org.apache.spark.sql.execution.command.AlterTableSplitPartitionCommand.run(carbonTableSchema.scala:229)
  at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
  at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
  at 
org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)
  at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
  at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
  at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
  at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
  at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113)
  at 
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87)
  at 
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87)
  at org.apache.spark.sql.Dataset.<init>(Dataset.scala:185)
  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)
  at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592)
  ... 50 elided
{code}

but partition {{2012}} already add to table {{temp.order_common}}:
{code}
scala> carbon.sql("show partitions  temp.order_common").show(100, 100)
+----------------------+
|             partition|
+----------------------+
|       0, dt = DEFAULT|
|         1, dt < 2010 |
|2, 2010 <= dt <  2011 |
|3, 2011 <= dt <  2012 |
+----------------------+
{code}


> Add partition to table cause NoSuchTableException
> -------------------------------------------------
>
>                 Key: CARBONDATA-1593
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-1593
>             Project: CarbonData
>          Issue Type: Bug
>          Components: sql
>    Affects Versions: 1.2.0
>            Reporter: wyp
>
> When I run the following code snippet, I get NoSuchTableException:
> {code}
> scala> import org.apache.spark.sql.SparkSession
> scala> import org.apache.spark.sql.CarbonSession._
> scala> val carbon = 
> SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://mycluster/user/wyp/carbon")
> scala> carbon.sql("CREATE TABLE temp.order(id bigint,  order_no 
> string,create_time timestamp)  partitioned by (dt string) STORED BY 
> 'carbondata' 
> tblproperties('partition_type'='RANGE','RANGE_INFO'='2010,2011')")
> scala> carbon.sql("ALTER TABLE temp.order_common ADD PARTITION('2012')")
> org.apache.spark.sql.catalyst.analysis.NoSuchTableException: Table or view 
> 'order_common' not found in database 'default';
>   at 
> org.apache.spark.sql.hive.client.HiveClient$$anonfun$getTable$1.apply(HiveClient.scala:76)
>   at 
> org.apache.spark.sql.hive.client.HiveClient$$anonfun$getTable$1.apply(HiveClient.scala:76)
>   at scala.Option.getOrElse(Option.scala:121)
>   at 
> org.apache.spark.sql.hive.client.HiveClient$class.getTable(HiveClient.scala:76)
>   at 
> org.apache.spark.sql.hive.client.HiveClientImpl.getTable(HiveClientImpl.scala:78)
>   at 
> org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable$1.apply(HiveExternalCatalog.scala:110)
>   at 
> org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable$1.apply(HiveExternalCatalog.scala:110)
>   at 
> org.apache.spark.sql.hive.HiveExternalCatalog.withClient(HiveExternalCatalog.scala:95)
>   at 
> org.apache.spark.sql.hive.HiveExternalCatalog.org$apache$spark$sql$hive$HiveExternalCatalog$$getRawTable(HiveExternalCatalog.scala:109)
>   at 
> org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$getTable$1.apply(HiveExternalCatalog.scala:601)
>   at 
> org.apache.spark.sql.hive.HiveExternalCatalog$$anonfun$getTable$1.apply(HiveExternalCatalog.scala:601)
>   at 
> org.apache.spark.sql.hive.HiveExternalCatalog.withClient(HiveExternalCatalog.scala:95)
>   at 
> org.apache.spark.sql.hive.HiveExternalCatalog.getTable(HiveExternalCatalog.scala:600)
>   at 
> org.apache.spark.sql.hive.HiveMetastoreCatalog.lookupRelation(HiveMetastoreCatalog.scala:106)
>   at 
> org.apache.spark.sql.hive.HiveSessionCatalog.lookupRelation(HiveSessionCatalog.scala:69)
>   at 
> org.apache.spark.sql.hive.CarbonSessionCatalog.lookupRelation(CarbonSessionState.scala:83)
>   at 
> org.apache.spark.sql.internal.CatalogImpl.refreshTable(CatalogImpl.scala:461)
>   at 
> org.apache.spark.sql.execution.command.AlterTableSplitPartitionCommand.processSchema(carbonTableSchema.scala:283)
>   at 
> org.apache.spark.sql.execution.command.AlterTableSplitPartitionCommand.run(carbonTableSchema.scala:229)
>   at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
>   at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
>   at 
> org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)
>   at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
>   at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
>   at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
>   at 
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>   at 
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
>   at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113)
>   at 
> org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87)
>   at 
> org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87)
>   at org.apache.spark.sql.Dataset.<init>(Dataset.scala:185)
>   at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)
>   at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592)
>   ... 50 elided
> {code}
> but partition {{2012}} already add to table {{temp.order_common}}:
> {code}
> scala> carbon.sql("show partitions  temp.order_common").show(100, 100)
> +----------------------+
> |             partition|
> +----------------------+
> |       0, dt = DEFAULT|
> |         1, dt < 2010 |
> |2, 2010 <= dt <  2011 |
> |3, 2011 <= dt <  2012 |
> +----------------------+
> My Spark version is 2.1.0, Carbondata is 1.2.0.
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to