Repository: spark
Updated Branches:
  refs/heads/master 425bcf6d6 -> a632bb56f


[SPARK-14208][SQL] Renames spark.sql.parquet.fileScan

## What changes were proposed in this pull request?

Renames SQL option `spark.sql.parquet.fileScan` since now all 
`HadoopFsRelation` based data sources are being migrated to `FileScanRDD` code 
path.

## How was this patch tested?

None.

Author: Cheng Lian <l...@databricks.com>

Closes #12003 from liancheng/spark-14208-option-renaming.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/a632bb56
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/a632bb56
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/a632bb56

Branch: refs/heads/master
Commit: a632bb56f8867df39a78d7f01fb870f548b09815
Parents: 425bcf6
Author: Cheng Lian <l...@databricks.com>
Authored: Tue Mar 29 20:56:01 2016 +0800
Committer: Cheng Lian <l...@databricks.com>
Committed: Tue Mar 29 20:56:01 2016 +0800

----------------------------------------------------------------------
 .../spark/sql/execution/datasources/FileSourceStrategy.scala | 2 +-
 .../main/scala/org/apache/spark/sql/internal/SQLConf.scala   | 8 ++++----
 2 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/a632bb56/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
index 76a724e..20fda95 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
@@ -60,7 +60,7 @@ private[sql] object FileSourceStrategy extends Strategy with 
Logging {
          files.fileFormat.isInstanceOf[parquet.DefaultSource] ||
          files.fileFormat.toString == "ORC" ||
          files.fileFormat.isInstanceOf[json.DefaultSource]) &&
-         files.sqlContext.conf.parquetFileScan =>
+         files.sqlContext.conf.useFileScan =>
       // Filters on this relation fall into four categories based on where we 
can use them to avoid
       // reading unneeded data:
       //  - partition keys only - used to prune directories to read

http://git-wip-us.apache.org/repos/asf/spark/blob/a632bb56/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 77af0e0..ca6ba4c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -288,9 +288,9 @@ object SQLConf {
     defaultValue = Some(true),
     doc = "Whether the query analyzer should be case sensitive or not.")
 
-  val PARQUET_FILE_SCAN = booleanConf("spark.sql.parquet.fileScan",
+  val USE_FILE_SCAN = booleanConf("spark.sql.sources.fileScan",
     defaultValue = Some(true),
-    doc = "Use the new FileScanRDD path for reading parquet data.",
+    doc = "Use the new FileScanRDD path for reading HDSF based data sources.",
     isPublic = false)
 
   val PARQUET_SCHEMA_MERGING_ENABLED = 
booleanConf("spark.sql.parquet.mergeSchema",
@@ -583,9 +583,9 @@ class SQLConf extends Serializable with CatalystConf with 
ParserConf with Loggin
 
   def useCompression: Boolean = getConf(COMPRESS_CACHED)
 
-  def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION)
+  def useFileScan: Boolean = getConf(USE_FILE_SCAN)
 
-  def parquetFileScan: Boolean = getConf(PARQUET_FILE_SCAN)
+  def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION)
 
   def parquetCacheMetadata: Boolean = getConf(PARQUET_CACHE_METADATA)
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to