Repository: spark
Updated Branches:
  refs/heads/master 0867b23c7 -> 49b1504fe


Revert "[SPARK-9228] [SQL] use tungsten.enabled in public for both of 
codegen/unsafe"

This reverts commit 4e70e8256ce2f45b438642372329eac7b1e9e8cf.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/49b1504f
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/49b1504f
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/49b1504f

Branch: refs/heads/master
Commit: 49b1504fe3733eb36a7fc6317ec19aeba5d46f97
Parents: 0867b23
Author: Davies Liu <davies....@gmail.com>
Authored: Thu Aug 6 17:36:12 2015 -0700
Committer: Davies Liu <davies....@gmail.com>
Committed: Thu Aug 6 17:36:12 2015 -0700

----------------------------------------------------------------------
 docs/sql-programming-guide.md                   |  6 +++---
 .../scala/org/apache/spark/sql/SQLConf.scala    | 20 +++++++-------------
 .../apache/spark/sql/execution/SparkPlan.scala  |  8 +-------
 .../spark/sql/execution/joins/HashJoin.scala    |  3 +--
 .../sql/execution/joins/HashOuterJoin.scala     |  2 +-
 .../sql/execution/joins/HashSemiJoin.scala      |  3 +--
 6 files changed, 14 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/49b1504f/docs/sql-programming-guide.md
----------------------------------------------------------------------
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index 6c31717..3ea77e8 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -1884,11 +1884,11 @@ that these options will be deprecated in future release 
as more optimizations ar
     </td>
   </tr>
   <tr>
-    <td><code>spark.sql.tungsten.enabled</code></td>
+    <td><code>spark.sql.codegen</code></td>
     <td>true</td>
     <td>
-      When true, use the optimized Tungsten physical execution backend which 
explicitly manages memory
-      and dynamically generates bytecode for expression evaluation.
+      When true, code will be dynamically generated at runtime for expression 
evaluation in a specific
+      query. For some queries with complicated expression this option can lead 
to significant speed-ups.
     </td>
   </tr>
   <tr>

http://git-wip-us.apache.org/repos/asf/spark/blob/49b1504f/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
index ef35c13..f836122 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
@@ -223,21 +223,14 @@ private[spark] object SQLConf {
     defaultValue = Some(200),
     doc = "The default number of partitions to use when shuffling data for 
joins or aggregations.")
 
-  val TUNGSTEN_ENABLED = booleanConf("spark.sql.tungsten.enabled",
-    defaultValue = Some(true),
-    doc = "When true, use the optimized Tungsten physical execution backend 
which explicitly " +
-          "manages memory and dynamically generates bytecode for expression 
evaluation.")
-
   val CODEGEN_ENABLED = booleanConf("spark.sql.codegen",
-    defaultValue = Some(true),  // use TUNGSTEN_ENABLED as default
+    defaultValue = Some(true),
     doc = "When true, code will be dynamically generated at runtime for 
expression evaluation in" +
-      " a specific query.",
-    isPublic = false)
+      " a specific query.")
 
   val UNSAFE_ENABLED = booleanConf("spark.sql.unsafe.enabled",
-    defaultValue = Some(true),  // use TUNGSTEN_ENABLED as default
-    doc = "When true, use the new optimized Tungsten physical execution 
backend.",
-    isPublic = false)
+    defaultValue = Some(true),
+    doc = "When true, use the new optimized Tungsten physical execution 
backend.")
 
   val DIALECT = stringConf(
     "spark.sql.dialect",
@@ -434,6 +427,7 @@ private[spark] object SQLConf {
  *
  * SQLConf is thread-safe (internally synchronized, so safe to be used in 
multiple threads).
  */
+
 private[sql] class SQLConf extends Serializable with CatalystConf {
   import SQLConf._
 
@@ -480,11 +474,11 @@ private[sql] class SQLConf extends Serializable with 
CatalystConf {
 
   private[spark] def sortMergeJoinEnabled: Boolean = getConf(SORTMERGE_JOIN)
 
-  private[spark] def codegenEnabled: Boolean = getConf(CODEGEN_ENABLED, 
getConf(TUNGSTEN_ENABLED))
+  private[spark] def codegenEnabled: Boolean = getConf(CODEGEN_ENABLED)
 
   def caseSensitiveAnalysis: Boolean = getConf(SQLConf.CASE_SENSITIVE)
 
-  private[spark] def unsafeEnabled: Boolean = getConf(UNSAFE_ENABLED, 
getConf(TUNGSTEN_ENABLED))
+  private[spark] def unsafeEnabled: Boolean = getConf(UNSAFE_ENABLED)
 
   private[spark] def useSqlAggregate2: Boolean = getConf(USE_SQL_AGGREGATE2)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/49b1504f/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
index 3fff79c..2f29067 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala
@@ -55,18 +55,12 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with 
Logging with Serializ
   protected def sparkContext = sqlContext.sparkContext
 
   // sqlContext will be null when we are being deserialized on the slaves.  In 
this instance
-  // the value of codegenEnabled/unsafeEnabled will be set by the 
desserializer after the
-  // constructor has run.
+  // the value of codegenEnabled will be set by the desserializer after the 
constructor has run.
   val codegenEnabled: Boolean = if (sqlContext != null) {
     sqlContext.conf.codegenEnabled
   } else {
     false
   }
-  val unsafeEnabled: Boolean = if (sqlContext != null) {
-    sqlContext.conf.unsafeEnabled
-  } else {
-    false
-  }
 
   /**
    * Whether the "prepare" method is called.

http://git-wip-us.apache.org/repos/asf/spark/blob/49b1504f/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala
index 22d46d1..5e9cd9f 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoin.scala
@@ -44,8 +44,7 @@ trait HashJoin {
   override def output: Seq[Attribute] = left.output ++ right.output
 
   protected[this] def isUnsafeMode: Boolean = {
-    (self.codegenEnabled && self.unsafeEnabled
-      && UnsafeProjection.canSupport(buildKeys)
+    (self.codegenEnabled && UnsafeProjection.canSupport(buildKeys)
       && UnsafeProjection.canSupport(self.schema))
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/49b1504f/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashOuterJoin.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashOuterJoin.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashOuterJoin.scala
index 701bd3c..346337e 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashOuterJoin.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashOuterJoin.scala
@@ -67,7 +67,7 @@ trait HashOuterJoin {
   }
 
   protected[this] def isUnsafeMode: Boolean = {
-    (self.codegenEnabled && self.unsafeEnabled && joinType != FullOuter
+    (self.codegenEnabled && joinType != FullOuter
       && UnsafeProjection.canSupport(buildKeys)
       && UnsafeProjection.canSupport(self.schema))
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/49b1504f/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashSemiJoin.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashSemiJoin.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashSemiJoin.scala
index 82dd6eb..47a7d37 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashSemiJoin.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashSemiJoin.scala
@@ -33,8 +33,7 @@ trait HashSemiJoin {
   override def output: Seq[Attribute] = left.output
 
   protected[this] def supportUnsafe: Boolean = {
-    (self.codegenEnabled && self.unsafeEnabled
-      && UnsafeProjection.canSupport(leftKeys)
+    (self.codegenEnabled && UnsafeProjection.canSupport(leftKeys)
       && UnsafeProjection.canSupport(rightKeys)
       && UnsafeProjection.canSupport(left.schema)
       && UnsafeProjection.canSupport(right.schema))


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to