[ https://issues.apache.org/jira/browse/SPARK-14651?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17136169#comment-17136169 ]
Jeffrey E Rodriguez edited comment on SPARK-14651 at 6/15/20, 9:58 PM: ------------------------------------------------------------------------ This is showing as solve/incomplete. Trying to work with Spark 2.4 Cloudera and it is still an issue. I get the message " CREATE TEMPORARY TABLE is not supported yet. Please use CREATE TEMPORARY VIEW as an alternative." Is that the fix?? Use TEMPORARY VIEW?? was (Author: jeffreyr97): This is showing as solve/incomplete. Trying to work with Spark 2.4 Cloudera and it is still an issue. I get message " CREATE TEMPORARY TABLE is not supported yet. Please use CREATE TEMPORARY VIEW as an alternative." Is that the fix?? Use TEMPARY VIEW?? > CREATE TEMPORARY TABLE is not supported yet > ------------------------------------------- > > Key: SPARK-14651 > URL: https://issues.apache.org/jira/browse/SPARK-14651 > Project: Spark > Issue Type: Bug > Components: SQL > Affects Versions: 2.0.0, 2.2.0 > Reporter: Jacek Laskowski > Priority: Minor > Labels: bulk-closed > > With today's master it seems that {{CREATE TEMPORARY TABLE}} may or may not > work depending on how complete the DDL is (?) > {code} > scala> sql("CREATE temporary table t2") > 16/04/14 23:29:26 INFO HiveSqlParser: Parsing command: CREATE temporary table > t2 > org.apache.spark.sql.catalyst.parser.ParseException: > CREATE TEMPORARY TABLE is not supported yet. Please use registerTempTable as > an alternative.(line 1, pos 0) > == SQL == > CREATE temporary table t2 > ^^^ > at > org.apache.spark.sql.hive.execution.HiveSqlAstBuilder$$anonfun$visitCreateTable$1.apply(HiveSqlParser.scala:169) > at > org.apache.spark.sql.hive.execution.HiveSqlAstBuilder$$anonfun$visitCreateTable$1.apply(HiveSqlParser.scala:165) > at > org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:85) > at > org.apache.spark.sql.hive.execution.HiveSqlAstBuilder.visitCreateTable(HiveSqlParser.scala:165) > at > org.apache.spark.sql.hive.execution.HiveSqlAstBuilder.visitCreateTable(HiveSqlParser.scala:53) > at > org.apache.spark.sql.catalyst.parser.SqlBaseParser$CreateTableContext.accept(SqlBaseParser.java:1049) > at > org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visit(AbstractParseTreeVisitor.java:42) > at > org.apache.spark.sql.catalyst.parser.AstBuilder$$anonfun$visitSingleStatement$1.apply(AstBuilder.scala:63) > at > org.apache.spark.sql.catalyst.parser.AstBuilder$$anonfun$visitSingleStatement$1.apply(AstBuilder.scala:63) > at > org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:85) > at > org.apache.spark.sql.catalyst.parser.AstBuilder.visitSingleStatement(AstBuilder.scala:62) > at > org.apache.spark.sql.catalyst.parser.AbstractSqlParser$$anonfun$parsePlan$1.apply(ParseDriver.scala:54) > at > org.apache.spark.sql.catalyst.parser.AbstractSqlParser$$anonfun$parsePlan$1.apply(ParseDriver.scala:53) > at > org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:86) > at > org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:53) > at org.apache.spark.sql.SQLContext.parseSql(SQLContext.scala:198) > at > org.apache.spark.sql.hive.HiveContext.org$apache$spark$sql$hive$HiveContext$$super$parseSql(HiveContext.scala:201) > at > org.apache.spark.sql.hive.HiveContext$$anonfun$parseSql$1.apply(HiveContext.scala:201) > at > org.apache.spark.sql.hive.HiveContext$$anonfun$parseSql$1.apply(HiveContext.scala:201) > at > org.apache.spark.sql.hive.client.HiveClientImpl$$anonfun$withHiveState$1.apply(HiveClientImpl.scala:228) > at > org.apache.spark.sql.hive.client.HiveClientImpl.liftedTree1$1(HiveClientImpl.scala:175) > at > org.apache.spark.sql.hive.client.HiveClientImpl.retryLocked(HiveClientImpl.scala:174) > at > org.apache.spark.sql.hive.client.HiveClientImpl.withHiveState(HiveClientImpl.scala:217) > at org.apache.spark.sql.hive.HiveContext.parseSql(HiveContext.scala:200) > at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:765) > ... 48 elided > scala> sql("CREATE temporary table t2 USING PARQUET OPTIONS (PATH 'hello') AS > SELECT * FROM t1") > 16/04/14 23:30:21 INFO HiveSqlParser: Parsing command: CREATE temporary table > t2 USING PARQUET OPTIONS (PATH 'hello') AS SELECT * FROM t1 > org.apache.spark.sql.AnalysisException: Table or View not found: t1; line 1 > pos 80 > at > org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.org$apache$spark$sql$catalyst$analysis$Analyzer$ResolveRelations$$getTable(Analyzer.scala:412) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:421) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:416) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:58) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:58) > at > org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:68) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:57) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:55) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:55) > at > org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$5.apply(TreeNode.scala:302) > at scala.collection.Iterator$$anon$11.next(Iterator.scala:409) > at scala.collection.Iterator$class.foreach(Iterator.scala:893) > at scala.collection.AbstractIterator.foreach(Iterator.scala:1336) > at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59) > at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104) > at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48) > at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310) > at scala.collection.AbstractIterator.to(Iterator.scala:1336) > at > scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302) > at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1336) > at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289) > at scala.collection.AbstractIterator.toArray(Iterator.scala:1336) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:351) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:55) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:55) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:55) > at > org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$5.apply(TreeNode.scala:302) > at scala.collection.Iterator$$anon$11.next(Iterator.scala:409) > at scala.collection.Iterator$class.foreach(Iterator.scala:893) > at scala.collection.AbstractIterator.foreach(Iterator.scala:1336) > at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59) > at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104) > at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48) > at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310) > at scala.collection.AbstractIterator.to(Iterator.scala:1336) > at > scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302) > at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1336) > at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289) > at scala.collection.AbstractIterator.toArray(Iterator.scala:1336) > at > org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:351) > at > org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:55) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:416) > at > org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:406) > at > org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:85) > at > org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:82) > at > scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:124) > at scala.collection.immutable.List.foldLeft(List.scala:84) > at > org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:82) > at > org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:74) > at scala.collection.immutable.List.foreach(List.scala:381) > at > org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:74) > at > org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:46) > at > org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:46) > at > org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:41) > at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:61) > at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:765) > ... 48 elided > {code} -- This message was sent by Atlassian Jira (v8.3.4#803005) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org