Repository: spark
Updated Branches:
  refs/heads/master 160a54061 -> d5202259d


[SPARK-21127][SQL][FOLLOWUP] fix a config name typo

## What changes were proposed in this pull request?

`spark.sql.statistics.autoUpdate.size` should be 
`spark.sql.statistics.size.autoUpdate.enabled`. The previous name is confusing 
as users may treat it as a size config.

This config is in master branch only, no backward compatibility issue.

## How was this patch tested?

N/A

Author: Wenchen Fan <wenc...@databricks.com>

Closes #19667 from cloud-fan/minor.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/d5202259
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/d5202259
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/d5202259

Branch: refs/heads/master
Commit: d5202259d9aa9ad95d572af253bf4a722b7b437a
Parents: 160a540
Author: Wenchen Fan <wenc...@databricks.com>
Authored: Tue Nov 7 09:33:52 2017 -0800
Committer: gatorsmile <gatorsm...@gmail.com>
Committed: Tue Nov 7 09:33:52 2017 -0800

----------------------------------------------------------------------
 .../scala/org/apache/spark/sql/internal/SQLConf.scala     |  6 +++---
 .../apache/spark/sql/execution/command/CommandUtils.scala |  2 +-
 .../org/apache/spark/sql/execution/command/ddl.scala      |  2 +-
 .../org/apache/spark/sql/StatisticsCollectionSuite.scala  | 10 +++++-----
 .../scala/org/apache/spark/sql/hive/StatisticsSuite.scala |  6 +++---
 5 files changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/d5202259/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index ede116e..a04f877 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -812,8 +812,8 @@ object SQLConf {
       .doubleConf
       .createWithDefault(0.05)
 
-  val AUTO_UPDATE_SIZE =
-    buildConf("spark.sql.statistics.autoUpdate.size")
+  val AUTO_SIZE_UPDATE_ENABLED =
+    buildConf("spark.sql.statistics.size.autoUpdate.enabled")
       .doc("Enables automatic update for table size once table's data is 
changed. Note that if " +
         "the total number of files of the table is very large, this can be 
expensive and slow " +
         "down data change commands.")
@@ -1206,7 +1206,7 @@ class SQLConf extends Serializable with Logging {
 
   def cboEnabled: Boolean = getConf(SQLConf.CBO_ENABLED)
 
-  def autoUpdateSize: Boolean = getConf(SQLConf.AUTO_UPDATE_SIZE)
+  def autoSizeUpdateEnabled: Boolean = 
getConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED)
 
   def joinReorderEnabled: Boolean = getConf(SQLConf.JOIN_REORDER_ENABLED)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d5202259/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
index b22958d..1a0d67f 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
@@ -36,7 +36,7 @@ object CommandUtils extends Logging {
   def updateTableStats(sparkSession: SparkSession, table: CatalogTable): Unit 
= {
     if (table.stats.nonEmpty) {
       val catalog = sparkSession.sessionState.catalog
-      if (sparkSession.sessionState.conf.autoUpdateSize) {
+      if (sparkSession.sessionState.conf.autoSizeUpdateEnabled) {
         val newTable = catalog.getTableMetadata(table.identifier)
         val newSize = 
CommandUtils.calculateTotalSize(sparkSession.sessionState, newTable)
         val newStats = CatalogStatistics(sizeInBytes = newSize)

http://git-wip-us.apache.org/repos/asf/spark/blob/d5202259/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
index a9cd65e..568567a 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
@@ -442,7 +442,7 @@ case class AlterTableAddPartitionCommand(
     catalog.createPartitions(table.identifier, parts, ignoreIfExists = 
ifNotExists)
 
     if (table.stats.nonEmpty) {
-      if (sparkSession.sessionState.conf.autoUpdateSize) {
+      if (sparkSession.sessionState.conf.autoSizeUpdateEnabled) {
         val addedSize = parts.map { part =>
           CommandUtils.calculateLocationSize(sparkSession.sessionState, 
table.identifier,
             part.storage.locationUri)

http://git-wip-us.apache.org/repos/asf/spark/blob/d5202259/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
index 2fc92f4..7247c3a 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
@@ -216,7 +216,7 @@ class StatisticsCollectionSuite extends 
StatisticsCollectionTestBase with Shared
   test("change stats after set location command") {
     val table = "change_stats_set_location_table"
     Seq(false, true).foreach { autoUpdate =>
-      withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
+      withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) 
{
         withTable(table) {
           spark.range(100).select($"id", $"id" % 5 as 
"value").write.saveAsTable(table)
           // analyze to get initial stats
@@ -252,7 +252,7 @@ class StatisticsCollectionSuite extends 
StatisticsCollectionTestBase with Shared
   test("change stats after insert command for datasource table") {
     val table = "change_stats_insert_datasource_table"
     Seq(false, true).foreach { autoUpdate =>
-      withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
+      withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) 
{
         withTable(table) {
           sql(s"CREATE TABLE $table (i int, j string) USING PARQUET")
           // analyze to get initial stats
@@ -285,7 +285,7 @@ class StatisticsCollectionSuite extends 
StatisticsCollectionTestBase with Shared
   test("invalidation of tableRelationCache after inserts") {
     val table = "invalidate_catalog_cache_table"
     Seq(false, true).foreach { autoUpdate =>
-      withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
+      withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) 
{
         withTable(table) {
           spark.range(100).write.saveAsTable(table)
           sql(s"ANALYZE TABLE $table COMPUTE STATISTICS")
@@ -302,7 +302,7 @@ class StatisticsCollectionSuite extends 
StatisticsCollectionTestBase with Shared
   test("invalidation of tableRelationCache after table truncation") {
     val table = "invalidate_catalog_cache_table"
     Seq(false, true).foreach { autoUpdate =>
-      withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
+      withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) 
{
         withTable(table) {
           spark.range(100).write.saveAsTable(table)
           sql(s"ANALYZE TABLE $table COMPUTE STATISTICS")
@@ -318,7 +318,7 @@ class StatisticsCollectionSuite extends 
StatisticsCollectionTestBase with Shared
   test("invalidation of tableRelationCache after alter table add partition") {
     val table = "invalidate_catalog_cache_table"
     Seq(false, true).foreach { autoUpdate =>
-      withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
+      withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) 
{
         withTempDir { dir =>
           withTable(table) {
             val path = dir.getCanonicalPath

http://git-wip-us.apache.org/repos/asf/spark/blob/d5202259/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
index b9a5ad7..9e8fc32 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
@@ -755,7 +755,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase 
with TestHiveSingleto
   test("change stats after insert command for hive table") {
     val table = s"change_stats_insert_hive_table"
     Seq(false, true).foreach { autoUpdate =>
-      withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
+      withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) 
{
         withTable(table) {
           sql(s"CREATE TABLE $table (i int, j string)")
           // analyze to get initial stats
@@ -783,7 +783,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase 
with TestHiveSingleto
   test("change stats after load data command") {
     val table = "change_stats_load_table"
     Seq(false, true).foreach { autoUpdate =>
-      withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
+      withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) 
{
         withTable(table) {
           sql(s"CREATE TABLE $table (i INT, j STRING) STORED AS PARQUET")
           // analyze to get initial stats
@@ -817,7 +817,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase 
with TestHiveSingleto
   test("change stats after add/drop partition command") {
     val table = "change_stats_part_table"
     Seq(false, true).foreach { autoUpdate =>
-      withSQLConf(SQLConf.AUTO_UPDATE_SIZE.key -> autoUpdate.toString) {
+      withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) 
{
         withTable(table) {
           sql(s"CREATE TABLE $table (i INT, j STRING) PARTITIONED BY (ds 
STRING, hr STRING)")
           // table has two partitions initially


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to