Github user scwf commented on a diff in the pull request:

    https://github.com/apache/spark/pull/14712#discussion_r77014850
  
    --- Diff: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala ---
    @@ -168,6 +169,107 @@ class StatisticsSuite extends QueryTest with 
TestHiveSingleton with SQLTestUtils
           TableIdentifier("tempTable"), ignoreIfNotExists = true, purge = 
false)
       }
     
    +  private def checkMetastoreRelationStats(
    +      tableName: String,
    +      expectedTotalSize: Long,
    +      expectedRowCount: Option[BigInt]): Unit = {
    +    val df = sql(s"SELECT * FROM $tableName")
    +    val relations = df.queryExecution.analyzed.collect { case rel: 
MetastoreRelation =>
    +      assert(rel.statistics.sizeInBytes === expectedTotalSize)
    +      assert(rel.statistics.rowCount === expectedRowCount)
    +      rel
    +    }
    +    assert(relations.size === 1)
    +  }
    +
    +  test("test table-level statistics for hive tables created in 
HiveExternalCatalog") {
    +    val textTable = "textTable"
    +    withTable(textTable) {
    +      sql(s"CREATE TABLE $textTable (key STRING, value STRING) STORED AS 
TEXTFILE")
    +      checkMetastoreRelationStats(textTable,
    +        expectedTotalSize = spark.sessionState.conf.defaultSizeInBytes, 
expectedRowCount = None)
    +
    +      sql(s"INSERT INTO TABLE $textTable SELECT * FROM src")
    +      // don't have our statistics, MetastoreRelation uses hive's 
`totalSize`
    +      checkMetastoreRelationStats(textTable, expectedTotalSize = 5812, 
expectedRowCount = None)
    +
    +      // noscan won't count the number of rows
    +      sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan")
    +      checkMetastoreRelationStats(textTable, expectedTotalSize = 5812, 
expectedRowCount = None)
    +
    +      // without noscan, we count the number of rows
    +      sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS")
    +      checkMetastoreRelationStats(textTable, expectedTotalSize = 5812, 
expectedRowCount = Some(500))
    +    }
    +  }
    +
    +  test("test whether the old stats are removed") {
    +    val textTable = "textTable"
    +    withTable(textTable) {
    +      sql(s"CREATE TABLE $textTable (key STRING, value STRING) STORED AS 
TEXTFILE")
    +      sql(s"INSERT INTO TABLE $textTable SELECT * FROM src")
    +      sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS")
    +      checkMetastoreRelationStats(textTable, expectedTotalSize = 5812, 
expectedRowCount = Some(500))
    +
    +      sql(s"INSERT INTO TABLE $textTable SELECT * FROM src")
    +      sql(s"ANALYZE TABLE $textTable COMPUTE STATISTICS noscan")
    +      // update total size and remove the old and invalid row count
    +      checkMetastoreRelationStats(textTable, expectedTotalSize = 11624, 
expectedRowCount = None)
    +    }
    +  }
    +
    +  private def checkLogicalRelationStats(
    +      tableName: String,
    +      expectedRowCount: Option[BigInt]): Unit = {
    +    val df = sql(s"SELECT * FROM $tableName")
    +    val relations = df.queryExecution.analyzed.collect { case rel: 
LogicalRelation =>
    +      // TODO: We don't have an expected value here because parquet size 
is different on Windows
    +      // and Linux, we need to find the reason and fix this.
    +      assert(rel.statistics.sizeInBytes === rel.relation.sizeInBytes)
    +      assert(rel.statistics.rowCount === expectedRowCount)
    +      rel
    +    }
    +    assert(relations.size === 1)
    +  }
    +
    +  test("test statistics of LogicalRelation inherited from 
MetastoreRelation") {
    +    val parquetTable = "parquetTable"
    +    val orcTable = "orcTable"
    +    withTable(parquetTable, orcTable) {
    +      sql(s"CREATE TABLE $parquetTable (key STRING, value STRING) STORED 
AS PARQUET")
    +      sql(s"CREATE TABLE $orcTable (key STRING, value STRING) STORED AS 
ORC")
    +      sql(s"INSERT INTO TABLE $parquetTable SELECT * FROM src")
    +      sql(s"INSERT INTO TABLE $orcTable SELECT * FROM src")
    +      sql(s"ANALYZE TABLE $parquetTable COMPUTE STATISTICS")
    +      sql(s"ANALYZE TABLE $orcTable COMPUTE STATISTICS")
    +
    +      checkLogicalRelationStats(parquetTable, expectedRowCount = Some(500))
    --- End diff --
    
    for parquet table we'd better also set the 
`spark.sql.hive.convertMetastoreParquet` like orc table below, this is to avoid 
failure because of other test case changing this config.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to