Github user viirya commented on a diff in the pull request:

    https://github.com/apache/spark/pull/20624#discussion_r170499705
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala
 ---
    @@ -387,6 +390,143 @@ case class CatalogStatistics(
       }
     }
     
    +/**
    + * This class of statistics for a column is used in [[CatalogTable]] to 
interact with metastore.
    + */
    +case class CatalogColumnStat(
    +    distinctCount: Option[BigInt] = None,
    +    min: Option[String] = None,
    +    max: Option[String] = None,
    +    nullCount: Option[BigInt] = None,
    +    avgLen: Option[Long] = None,
    +    maxLen: Option[Long] = None,
    +    histogram: Option[Histogram] = None) {
    +
    +  /**
    +   * Returns a map from string to string that can be used to serialize the 
column stats.
    +   * The key is the name of the column and name of the field (e.g. 
"colName.distinctCount"),
    +   * and the value is the string representation for the value.
    +   * min/max values are stored as Strings. They can be deserialized using
    +   * [[CatalogColumnStat.fromExternalString]].
    +   *
    +   * As part of the protocol, the returned map always contains a key 
called "version".
    +   * Any of the fields that are null (None) won't appear in the map.
    +   */
    +  def toMap(colName: String): Map[String, String] = {
    +    val map = new scala.collection.mutable.HashMap[String, String]
    +    map.put(s"${colName}.${CatalogColumnStat.KEY_VERSION}", "1")
    +    distinctCount.foreach { v =>
    +      map.put(s"${colName}.${CatalogColumnStat.KEY_DISTINCT_COUNT}", 
v.toString)
    +    }
    +    nullCount.foreach { v =>
    +      map.put(s"${colName}.${CatalogColumnStat.KEY_NULL_COUNT}", 
v.toString)
    +    }
    +    avgLen.foreach { v => 
map.put(s"${colName}.${CatalogColumnStat.KEY_AVG_LEN}", v.toString) }
    +    maxLen.foreach { v => 
map.put(s"${colName}.${CatalogColumnStat.KEY_MAX_LEN}", v.toString) }
    +    min.foreach { v => 
map.put(s"${colName}.${CatalogColumnStat.KEY_MIN_VALUE}", v) }
    +    max.foreach { v => 
map.put(s"${colName}.${CatalogColumnStat.KEY_MAX_VALUE}", v) }
    +    histogram.foreach { h =>
    +      map.put(s"${colName}.${CatalogColumnStat.KEY_HISTOGRAM}", 
HistogramSerializer.serialize(h))
    +    }
    +    map.toMap
    +  }
    +
    +  /** Convert [[CatalogColumnStat]] to [[ColumnStat]]. */
    +  def toPlanStat(
    +      colName: String,
    +      dataType: DataType): ColumnStat =
    +    ColumnStat(
    +      distinctCount = distinctCount,
    +      min = min.map(CatalogColumnStat.fromExternalString(_, colName, 
dataType)),
    +      max = max.map(CatalogColumnStat.fromExternalString(_, colName, 
dataType)),
    +      nullCount = nullCount,
    +      avgLen = avgLen,
    +      maxLen = maxLen,
    +      histogram = histogram)
    +}
    +
    +object CatalogColumnStat extends Logging {
    +
    +  // List of string keys used to serialize CatalogColumnStat
    +  val KEY_VERSION = "version"
    +  private val KEY_DISTINCT_COUNT = "distinctCount"
    +  private val KEY_MIN_VALUE = "min"
    +  private val KEY_MAX_VALUE = "max"
    +  private val KEY_NULL_COUNT = "nullCount"
    +  private val KEY_AVG_LEN = "avgLen"
    +  private val KEY_MAX_LEN = "maxLen"
    +  private val KEY_HISTOGRAM = "histogram"
    +
    +  /**
    +   * Converts from string representation of data type to the corresponding 
Catalyst data type.
    +   */
    +  def fromExternalString(s: String, name: String, dataType: DataType): Any 
= {
    +    dataType match {
    +      case BooleanType => s.toBoolean
    +      case DateType => DateTimeUtils.fromJavaDate(java.sql.Date.valueOf(s))
    +      case TimestampType => 
DateTimeUtils.fromJavaTimestamp(java.sql.Timestamp.valueOf(s))
    +      case ByteType => s.toByte
    +      case ShortType => s.toShort
    +      case IntegerType => s.toInt
    +      case LongType => s.toLong
    +      case FloatType => s.toFloat
    +      case DoubleType => s.toDouble
    +      case _: DecimalType => Decimal(s)
    +      // This version of Spark does not use min/max for binary/string 
types so we ignore it.
    +      case BinaryType | StringType => null
    +      case _ =>
    +        throw new AnalysisException("Column statistics deserialization is 
not supported for " +
    +          s"column $name of data type: $dataType.")
    +    }
    +  }
    +
    +  /**
    +   * Converts the given value from Catalyst data type to string 
representation of external
    +   * data type.
    +   */
    +  def toExternalString(v: Any, colName: String, dataType: DataType): 
String = {
    +    val externalValue = dataType match {
    +      case DateType => DateTimeUtils.toJavaDate(v.asInstanceOf[Int])
    +      case TimestampType => 
DateTimeUtils.toJavaTimestamp(v.asInstanceOf[Long])
    +      case BooleanType | _: IntegralType | FloatType | DoubleType => v
    +      case _: DecimalType => v.asInstanceOf[Decimal].toJavaBigDecimal
    +      // This version of Spark does not use min/max for binary/string 
types so we ignore it.
    +      case _ =>
    +        throw new AnalysisException("Column statistics deserialization is 
not supported for " +
    --- End diff --
    
    `deserialization` -> `serialization`?


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to