Repository: spark
Updated Branches:
  refs/heads/branch-1.1 5f72d7bcf -> 64e136a64


[SPARK-2846][SQL] Add configureInputJobPropertiesForStorageHandler to 
initialization of job conf

...al job conf

Author: Alex Liu <alex_li...@yahoo.com>

Closes #1927 from alexliu68/SPARK-SQL-2846 and squashes the following commits:

e4bdc4c [Alex Liu] SPARK-SQL-2846 add 
configureInputJobPropertiesForStorageHandler to initial job conf

(cherry picked from commit d9e94146a6e65be110a62e3bd0351148912a41d1)
Signed-off-by: Michael Armbrust <mich...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/64e136a6
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/64e136a6
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/64e136a6

Branch: refs/heads/branch-1.1
Commit: 64e136a640a9ccbde74f7c754b375d175f1991d4
Parents: 5f72d7b
Author: Alex Liu <alex_li...@yahoo.com>
Authored: Wed Aug 20 16:14:06 2014 -0700
Committer: Michael Armbrust <mich...@databricks.com>
Committed: Wed Aug 20 16:14:17 2014 -0700

----------------------------------------------------------------------
 .../src/main/scala/org/apache/spark/sql/hive/TableReader.scala    | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/64e136a6/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
index 82c8828..329f80c 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
@@ -22,7 +22,7 @@ import org.apache.hadoop.fs.{Path, PathFilter}
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants._
 import org.apache.hadoop.hive.ql.exec.Utilities
 import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition, Table 
=> HiveTable}
-import org.apache.hadoop.hive.ql.plan.TableDesc
+import org.apache.hadoop.hive.ql.plan.{PlanUtils, TableDesc}
 import org.apache.hadoop.hive.serde2.Deserializer
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector
 
@@ -249,6 +249,7 @@ private[hive] object HadoopTableReader extends 
HiveInspectors {
   def initializeLocalJobConfFunc(path: String, tableDesc: TableDesc)(jobConf: 
JobConf) {
     FileInputFormat.setInputPaths(jobConf, path)
     if (tableDesc != null) {
+      PlanUtils.configureInputJobPropertiesForStorageHandler(tableDesc)
       Utilities.copyTableJobPropertiesToConf(tableDesc, jobConf)
     }
     val bufferSize = System.getProperty("spark.buffer.size", "65536")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to