Github user gatorsmile commented on a diff in the pull request:

    https://github.com/apache/spark/pull/20023#discussion_r161819811
  
    --- Diff: 
sql/catalyst/src/main/scala/org/apache/spark/sql/types/DecimalType.scala ---
    @@ -136,10 +137,52 @@ object DecimalType extends AbstractDataType {
         case DoubleType => DoubleDecimal
       }
     
    +  private[sql] def forLiteral(literal: Literal): DecimalType = 
literal.value match {
    +    case v: Short => fromBigDecimal(BigDecimal(v))
    +    case v: Int => fromBigDecimal(BigDecimal(v))
    +    case v: Long => fromBigDecimal(BigDecimal(v))
    +    case _ => forType(literal.dataType)
    +  }
    +
    +  private[sql] def fromBigDecimal(d: BigDecimal): DecimalType = {
    +    DecimalType(Math.max(d.precision, d.scale), d.scale)
    +  }
    +
       private[sql] def bounded(precision: Int, scale: Int): DecimalType = {
         DecimalType(min(precision, MAX_PRECISION), min(scale, MAX_SCALE))
       }
     
    +  /**
    +   * Scale adjustment implementation is based on Hive's one, which is 
itself inspired to
    +   * SQLServer's one. In particular, when a result precision is greater 
than
    +   * {@link #MAX_PRECISION}, the corresponding scale is reduced to prevent 
the integral part of a
    +   * result from being truncated.
    +   *
    +   * This method is used only when 
`spark.sql.decimalOperations.allowPrecisionLoss` is set to true.
    +   *
    +   * @param precision
    +   * @param scale
    +   * @return
    +   */
    +  private[sql] def adjustPrecisionScale(precision: Int, scale: Int): 
DecimalType = {
    --- End diff --
    
    Yeah, this part is consistent.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to