Github user wzhfy commented on a diff in the pull request: https://github.com/apache/spark/pull/16422#discussion_r126275601 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala --- @@ -626,6 +624,117 @@ case class DescribeTableCommand( } } +/** + * A command to list the info for a column, including name, data type, column stats and comment. + * This function creates a [[DescribeColumnCommand]] logical plan. + * + * The syntax of using this command in SQL is: + * {{{ + * DESCRIBE [EXTENDED|FORMATTED] table_name column_name; + * }}} + */ +case class DescribeColumnCommand( + table: TableIdentifier, + colNameParts: Seq[String], + isFormatted: Boolean) + extends RunnableCommand { + + override val output: Seq[Attribute] = { + // The displayed names are based on Hive. + // (Link for the corresponding Hive Jira: https://issues.apache.org/jira/browse/HIVE-7050) + if (isFormatted) { + Seq( + AttributeReference("col_name", StringType, nullable = false, + new MetadataBuilder().putString("comment", "name of the column").build())(), + AttributeReference("data_type", StringType, nullable = false, + new MetadataBuilder().putString("comment", "data type of the column").build())(), + AttributeReference("min", StringType, nullable = true, + new MetadataBuilder().putString("comment", "min value of the column").build())(), + AttributeReference("max", StringType, nullable = true, + new MetadataBuilder().putString("comment", "max value of the column").build())(), + AttributeReference("num_nulls", StringType, nullable = true, + new MetadataBuilder().putString("comment", "number of nulls of the column").build())(), + AttributeReference("distinct_count", StringType, nullable = true, + new MetadataBuilder().putString("comment", "distinct count of the column").build())(), + AttributeReference("avg_col_len", StringType, nullable = true, + new MetadataBuilder().putString("comment", + "average length of the values of the column").build())(), + AttributeReference("max_col_len", StringType, nullable = true, + new MetadataBuilder().putString("comment", + "maximum length of the values of the column").build())(), + AttributeReference("comment", StringType, nullable = true, + new MetadataBuilder().putString("comment", "comment of the column").build())()) + } else { + Seq( + AttributeReference("col_name", StringType, nullable = false, + new MetadataBuilder().putString("comment", "name of the column").build())(), + AttributeReference("data_type", StringType, nullable = false, + new MetadataBuilder().putString("comment", "data type of the column").build())(), + AttributeReference("comment", StringType, nullable = true, + new MetadataBuilder().putString("comment", "comment of the column").build())()) + } + } + + override def run(sparkSession: SparkSession): Seq[Row] = { + val catalog = sparkSession.sessionState.catalog + val resolver = sparkSession.sessionState.conf.resolver + val relation = sparkSession.table(table).queryExecution.analyzed + val attribute = { + val field = relation.resolve( --- End diff -- right, that's better, thanks!
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org