Github user juliuszsompolski commented on a diff in the pull request:

    https://github.com/apache/spark/pull/20624#discussion_r170463362
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala
 ---
    @@ -387,6 +390,101 @@ case class CatalogStatistics(
       }
     }
     
    +/**
    + * This class of statistics for a column is used in [[CatalogTable]] to 
interact with metastore.
    + */
    +case class CatalogColumnStat(
    +  distinctCount: Option[BigInt] = None,
    +  min: Option[String] = None,
    +  max: Option[String] = None,
    +  nullCount: Option[BigInt] = None,
    +  avgLen: Option[Long] = None,
    +  maxLen: Option[Long] = None,
    +  histogram: Option[Histogram] = None) {
    +
    +  /**
    +   * Returns a map from string to string that can be used to serialize the 
column stats.
    +   * The key is the name of the column and name of the field (e.g. 
"colName.distinctCount"),
    +   * and the value is the string representation for the value.
    +   * min/max values are stored as Strings. They can be deserialized using
    +   * [[ColumnStat.fromExternalString]].
    +   *
    +   * As part of the protocol, the returned map always contains a key 
called "version".
    +   * In the case min/max values are null (None), they won't appear in the 
map.
    +   */
    +  def toMap(colName: String): Map[String, String] = {
    +    val map = new scala.collection.mutable.HashMap[String, String]
    +    map.put(s"${colName}.${CatalogColumnStat.KEY_VERSION}", "1")
    +    distinctCount.foreach { v =>
    +      map.put(s"${colName}.${CatalogColumnStat.KEY_DISTINCT_COUNT}", 
v.toString)
    +    }
    +    nullCount.foreach { v =>
    +      map.put(s"${colName}.${CatalogColumnStat.KEY_NULL_COUNT}", 
v.toString)
    +    }
    +    avgLen.foreach { v => 
map.put(s"${colName}.${CatalogColumnStat.KEY_AVG_LEN}", v.toString) }
    +    maxLen.foreach { v => 
map.put(s"${colName}.${CatalogColumnStat.KEY_MAX_LEN}", v.toString) }
    +    min.foreach { v => 
map.put(s"${colName}.${CatalogColumnStat.KEY_MIN_VALUE}", v) }
    +    max.foreach { v => 
map.put(s"${colName}.${CatalogColumnStat.KEY_MAX_VALUE}", v) }
    +    histogram.foreach { h =>
    +      map.put(s"${colName}.${CatalogColumnStat.KEY_HISTOGRAM}", 
HistogramSerializer.serialize(h))
    +    }
    +    map.toMap
    +  }
    +
    +  /** Convert [[CatalogColumnStat]] to [[ColumnStat]]. */
    +  def toPlanStat(
    +      colName: String,
    +      dataType: DataType): ColumnStat =
    +    ColumnStat(
    +      distinctCount = distinctCount,
    +      min = min.map(ColumnStat.fromExternalString(_, colName, dataType)),
    +      max = max.map(ColumnStat.fromExternalString(_, colName, dataType)),
    +      nullCount = nullCount,
    +      avgLen = avgLen,
    +      maxLen = maxLen,
    +      histogram = histogram)
    +}
    +
    +object CatalogColumnStat extends Logging {
    +
    +  // List of string keys used to serialize CatalogColumnStat
    +  val KEY_VERSION = "version"
    +  private val KEY_DISTINCT_COUNT = "distinctCount"
    +  private val KEY_MIN_VALUE = "min"
    +  private val KEY_MAX_VALUE = "max"
    +  private val KEY_NULL_COUNT = "nullCount"
    +  private val KEY_AVG_LEN = "avgLen"
    +  private val KEY_MAX_LEN = "maxLen"
    +  private val KEY_HISTOGRAM = "histogram"
    +
    +  /**
    +   * Creates a [[CatalogColumnStat]] object from the given map.
    +   * This is used to deserialize column stats from some external storage.
    +   * The serialization side is defined in [[CatalogColumnStat.toMap]].
    +   */
    +  def fromMap(
    +    table: String,
    +    colName: String,
    +    map: Map[String, String]): Option[CatalogColumnStat] = {
    +
    +    try {
    +      Some(CatalogColumnStat(
    +        distinctCount = map.get(s"${colName}.${KEY_DISTINCT_COUNT}").map(v 
=> BigInt(v.toLong)),
    --- End diff --
    
    The format doesn't change.
    There is existing test `StatisticsSuite."verify serialized column stats 
after analyzing columns"` that the format of the serialized stats in the 
metastore doesn't change by comparing it to a manual map of properties.
    I will add a test that verifies it the other way - adds the properties 
manually as TBLPROPERTIES, and verifies that they are successfully parsed.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to