Repository: spark
Updated Branches:
  refs/heads/master 5e5d886a2 -> 5bd5e1b9c


[MINOR][SQL] Avoid hardcoded configuration keys in SQLConf's `doc`

## What changes were proposed in this pull request?

This PR proposes to avoid hardcorded configuration keys in SQLConf's `doc.

## How was this patch tested?

Manually verified.

Closes #22877 from HyukjinKwon/minor-conf-name.

Authored-by: hyukjinkwon <gurwls...@apache.org>
Signed-off-by: hyukjinkwon <gurwls...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/5bd5e1b9
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/5bd5e1b9
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/5bd5e1b9

Branch: refs/heads/master
Commit: 5bd5e1b9c84b5f7d4d67ab94e02d49ebdd02f177
Parents: 5e5d886
Author: hyukjinkwon <gurwls...@apache.org>
Authored: Tue Oct 30 07:38:26 2018 +0800
Committer: hyukjinkwon <gurwls...@apache.org>
Committed: Tue Oct 30 07:38:26 2018 +0800

----------------------------------------------------------------------
 .../org/apache/spark/sql/internal/SQLConf.scala | 41 +++++++++++---------
 1 file changed, 23 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/5bd5e1b9/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 4edffce..535ec51 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -408,7 +408,8 @@ object SQLConf {
 
   val PARQUET_FILTER_PUSHDOWN_DATE_ENABLED = 
buildConf("spark.sql.parquet.filterPushdown.date")
     .doc("If true, enables Parquet filter push-down optimization for Date. " +
-      "This configuration only has an effect when 
'spark.sql.parquet.filterPushdown' is enabled.")
+      s"This configuration only has an effect when 
'${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
+      "enabled.")
     .internal()
     .booleanConf
     .createWithDefault(true)
@@ -416,7 +417,7 @@ object SQLConf {
   val PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED =
     buildConf("spark.sql.parquet.filterPushdown.timestamp")
       .doc("If true, enables Parquet filter push-down optimization for 
Timestamp. " +
-        "This configuration only has an effect when 
'spark.sql.parquet.filterPushdown' is " +
+        s"This configuration only has an effect when 
'${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
         "enabled and Timestamp stored as TIMESTAMP_MICROS or TIMESTAMP_MILLIS 
type.")
     .internal()
     .booleanConf
@@ -425,7 +426,8 @@ object SQLConf {
   val PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED =
     buildConf("spark.sql.parquet.filterPushdown.decimal")
       .doc("If true, enables Parquet filter push-down optimization for 
Decimal. " +
-        "This configuration only has an effect when 
'spark.sql.parquet.filterPushdown' is enabled.")
+        s"This configuration only has an effect when 
'${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
+        "enabled.")
       .internal()
       .booleanConf
       .createWithDefault(true)
@@ -433,7 +435,8 @@ object SQLConf {
   val PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED =
     buildConf("spark.sql.parquet.filterPushdown.string.startsWith")
     .doc("If true, enables Parquet filter push-down optimization for string 
startsWith function. " +
-      "This configuration only has an effect when 
'spark.sql.parquet.filterPushdown' is enabled.")
+      s"This configuration only has an effect when 
'${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
+      "enabled.")
     .internal()
     .booleanConf
     .createWithDefault(true)
@@ -444,7 +447,8 @@ object SQLConf {
         "Large threshold won't necessarily provide much better performance. " +
         "The experiment argued that 300 is the limit threshold. " +
         "By setting this value to 0 this feature can be disabled. " +
-        "This configuration only has an effect when 
'spark.sql.parquet.filterPushdown' is enabled.")
+        s"This configuration only has an effect when 
'${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
+        "enabled.")
       .internal()
       .intConf
       .checkValue(threshold => threshold >= 0, "The threshold must not be 
negative.")
@@ -459,14 +463,6 @@ object SQLConf {
     .booleanConf
     .createWithDefault(false)
 
-  val PARQUET_RECORD_FILTER_ENABLED = 
buildConf("spark.sql.parquet.recordLevelFilter.enabled")
-    .doc("If true, enables Parquet's native record-level filtering using the 
pushed down " +
-      "filters. This configuration only has an effect when 
'spark.sql.parquet.filterPushdown' " +
-      "is enabled and the vectorized reader is not used. You can ensure the 
vectorized reader " +
-      "is not used by setting 'spark.sql.parquet.enableVectorizedReader' to 
false.")
-    .booleanConf
-    .createWithDefault(false)
-
   val PARQUET_OUTPUT_COMMITTER_CLASS = 
buildConf("spark.sql.parquet.output.committer.class")
     .doc("The output committer class used by Parquet. The specified class 
needs to be a " +
       "subclass of org.apache.hadoop.mapreduce.OutputCommitter. Typically, 
it's also a subclass " +
@@ -482,6 +478,15 @@ object SQLConf {
       .booleanConf
       .createWithDefault(true)
 
+  val PARQUET_RECORD_FILTER_ENABLED = 
buildConf("spark.sql.parquet.recordLevelFilter.enabled")
+    .doc("If true, enables Parquet's native record-level filtering using the 
pushed down " +
+      "filters. " +
+      s"This configuration only has an effect when 
'${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' " +
+      "is enabled and the vectorized reader is not used. You can ensure the 
vectorized reader " +
+      s"is not used by setting '${PARQUET_VECTORIZED_READER_ENABLED.key}' to 
false.")
+    .booleanConf
+    .createWithDefault(false)
+
   val PARQUET_VECTORIZED_READER_BATCH_SIZE = 
buildConf("spark.sql.parquet.columnarReaderBatchSize")
     .doc("The number of rows to include in a parquet vectorized reader batch. 
The number should " +
       "be carefully chosen to minimize overhead and avoid OOMs in reading 
data.")
@@ -642,7 +647,7 @@ object SQLConf {
     .internal()
     .doc("When true, a table created by a Hive CTAS statement (no USING 
clause) " +
       "without specifying any storage property will be converted to a data 
source table, " +
-      "using the data source set by spark.sql.sources.default.")
+      s"using the data source set by ${DEFAULT_DATA_SOURCE_NAME.key}.")
     .booleanConf
     .createWithDefault(false)
 
@@ -1108,7 +1113,7 @@ object SQLConf {
   val DEFAULT_SIZE_IN_BYTES = buildConf("spark.sql.defaultSizeInBytes")
     .internal()
     .doc("The default table size used in query planning. By default, it is set 
to Long.MaxValue " +
-      "which is larger than `spark.sql.autoBroadcastJoinThreshold` to be more 
conservative. " +
+      s"which is larger than `${AUTO_BROADCASTJOIN_THRESHOLD.key}` to be more 
conservative. " +
       "That is to say by default the optimizer will not choose to broadcast a 
table unless it " +
       "knows for sure its size is small enough.")
     .longConf
@@ -1279,7 +1284,7 @@ object SQLConf {
 
   val ARROW_FALLBACK_ENABLED =
     buildConf("spark.sql.execution.arrow.fallback.enabled")
-      .doc("When true, optimizations enabled by 
'spark.sql.execution.arrow.enabled' will " +
+      .doc(s"When true, optimizations enabled by 
'${ARROW_EXECUTION_ENABLED.key}' will " +
         "fallback automatically to non-optimized implementations if an error 
occurs.")
       .booleanConf
       .createWithDefault(true)
@@ -1492,7 +1497,7 @@ object SQLConf {
 
   val REPL_EAGER_EVAL_MAX_NUM_ROWS = 
buildConf("spark.sql.repl.eagerEval.maxNumRows")
     .doc("The max number of rows that are returned by eager evaluation. This 
only takes " +
-      "effect when spark.sql.repl.eagerEval.enabled is set to true. The valid 
range of this " +
+      s"effect when ${REPL_EAGER_EVAL_ENABLED.key} is set to true. The valid 
range of this " +
       "config is from 0 to (Int.MaxValue - 1), so the invalid config like 
negative and " +
       "greater than (Int.MaxValue - 1) will be normalized to 0 and 
(Int.MaxValue - 1).")
     .intConf
@@ -1500,7 +1505,7 @@ object SQLConf {
 
   val REPL_EAGER_EVAL_TRUNCATE = buildConf("spark.sql.repl.eagerEval.truncate")
     .doc("The max number of characters for each cell that is returned by eager 
evaluation. " +
-      "This only takes effect when spark.sql.repl.eagerEval.enabled is set to 
true.")
+      s"This only takes effect when ${REPL_EAGER_EVAL_ENABLED.key} is set to 
true.")
     .intConf
     .createWithDefault(20)
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to