Github user dilipbiswal commented on a diff in the pull request: https://github.com/apache/spark/pull/12222#discussion_r61027608 --- Diff: sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala --- @@ -122,4 +134,105 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto checkAnswer(sql("SHOW TBLPROPERTIES parquet_temp"), Nil) } } + + test("show columns") { + checkAnswer( + sql("SHOW COLUMNS IN parquet_tab3"), + Row("col1") :: Row("col 2") :: Nil) + + checkAnswer( + sql("SHOW COLUMNS IN default.parquet_tab3"), + Row("col1") :: Row("col 2") :: Nil) + + checkAnswer( + sql("SHOW COLUMNS IN parquet_tab3 FROM default"), + Row("col1") :: Row("col 2") :: Nil) + + checkAnswer( + sql("SHOW COLUMNS IN parquet_tab4 IN default"), + Row("price") :: Row("qty") :: Row("year") :: Row("month") :: Nil) + + val message = intercept[NoSuchTableException] { + sql("SHOW COLUMNS IN badtable FROM default") + }.getMessage + assert(message.contains("badtable not found in database")) + } + + test("show partitions - show everything") { + checkAnswer( + sql("show partitions parquet_tab4"), + Row("year=2015/month=1") :: + Row("year=2015/month=2") :: + Row("year=2016/month=2") :: + Row("year=2016/month=3") :: Nil) --- End diff -- @liancheng Thanks !! You are right !! Having >= 5 partition keys does expose the problem. Any advice on how to go about handling this ? Can we change TablePartitionSpec to be a LinkedHashMap instead ?
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org