Repository: incubator-zeppelin
Updated Branches:
  refs/heads/master 28a643385 -> 95dbdc7aa


Get useHiveContext property from environment variable

Author: Mina Lee <[email protected]>

Closes #87 from minahlee/add/usehivecontext_env_var and squashes the following 
commits:

8bc0698 [Mina Lee] Get interpreter properties from environment variable
e4054c1 [Mina Lee] Get useHiveContext property from environment variable


Project: http://git-wip-us.apache.org/repos/asf/incubator-zeppelin/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-zeppelin/commit/95dbdc7a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-zeppelin/tree/95dbdc7a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-zeppelin/diff/95dbdc7a

Branch: refs/heads/master
Commit: 95dbdc7aa7768bd0f681ebda1d004b5b6aee7a6a
Parents: 28a6433
Author: Mina Lee <[email protected]>
Authored: Tue Jun 2 18:20:17 2015 +0900
Committer: Lee moon soo <[email protected]>
Committed: Mon Jun 8 09:15:19 2015 -0700

----------------------------------------------------------------------
 conf/zeppelin-env.sh.template                             | 10 +++++++---
 .../java/org/apache/zeppelin/spark/SparkInterpreter.java  | 10 +++++++---
 .../org/apache/zeppelin/spark/SparkSqlInterpreter.java    |  9 +++++++--
 3 files changed, 21 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-zeppelin/blob/95dbdc7a/conf/zeppelin-env.sh.template
----------------------------------------------------------------------
diff --git a/conf/zeppelin-env.sh.template b/conf/zeppelin-env.sh.template
index 62aa95a..43d302b 100644
--- a/conf/zeppelin-env.sh.template
+++ b/conf/zeppelin-env.sh.template
@@ -17,18 +17,22 @@
 #
 
 # export JAVA_HOME=
-# export MASTER=                 # Spark master url. eg. 
spark://master_addr:7077. Leave empty if you want to use local mode
+# export MASTER=                 # Spark master url. eg. 
spark://master_addr:7077. Leave empty if you want to use local mode.
 # export ZEPPELIN_JAVA_OPTS      # Additional jvm options. for example, export 
ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory=8g -Dspark.cores.max=16"
 # export ZEPPELIN_MEM            # Zeppelin jvm mem options Default -Xmx1024m 
-XX:MaxPermSize=512m
-# export ZEPPELIN_INTP_MEM       # zeppelin interpreter process jvm mem 
options. Defualt = ZEPPELIN_MEM
+# export ZEPPELIN_INTP_MEM       # zeppelin interpreter process jvm mem 
options. Default = ZEPPELIN_MEM
 # export ZEPPELIN_INTP_JAVA_OPTS # zeppelin interpreter process jvm options. 
Default = ZEPPELIN_JAVA_OPTS
 
 # export ZEPPELIN_LOG_DIR        # Where log files are stored.  PWD by default.
 # export ZEPPELIN_PID_DIR        # The pid files are stored. /tmp by default.
 # export ZEPPELIN_NOTEBOOK_DIR   # Where notebook saved
-# export ZEPPELIN_IDENT_STRING   # A string representing this instance of 
zeppelin. $USER by default
+# export ZEPPELIN_IDENT_STRING   # A string representing this instance of 
zeppelin. $USER by default.
 # export ZEPPELIN_NICENESS       # The scheduling priority for daemons. 
Defaults to 0.
 
+# export ZEPPELIN_SPARK_USEHIVECONTEXT    # Use HiveContext instead of 
SQLContext if set true. true by default.
+# export ZEPPELIN_SPARK_CONCURRENTSQL     # Execute multiple SQL concurrently 
if set true. false by default.
+# export ZEPPELIN_SPARK_MAXRESULT         # Max number of SparkSQL result to 
display. 1000 by default.
+
 # Options read in YARN client mode
 # export HADOOP_CONF_DIR         # yarn-site.xml is located in configuration 
directory in HADOOP_CONF_DIR.
 

http://git-wip-us.apache.org/repos/asf/incubator-zeppelin/blob/95dbdc7a/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java
----------------------------------------------------------------------
diff --git 
a/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java 
b/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java
index 731068e..935b2a5 100644
--- a/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java
+++ b/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java
@@ -105,9 +105,13 @@ public class SparkInterpreter extends Interpreter {
                 getSystemDefault("SPARK_YARN_JAR", "spark.yarn.jar", ""),
                 "The location of the Spark jar file. If you use yarn as a 
cluster, "
                 + "we should set this value")
-            .add("zeppelin.spark.useHiveContext", "true",
-                 "Use HiveContext instead of SQLContext if it is true.")
-            .add("zeppelin.spark.maxResult", "1000", "Max number of SparkSQL 
result to display.")
+            .add("zeppelin.spark.useHiveContext",
+                getSystemDefault("ZEPPELIN_SPARK_USEHIVECONTEXT", 
+                    "zeppelin.spark.useHiveContext", "true"),
+                "Use HiveContext instead of SQLContext if it is true.")
+            .add("zeppelin.spark.maxResult",
+                getSystemDefault("ZEPPELIN_SPARK_MAXRESULT", 
"zeppelin.spark.maxResult", "1000"),
+                "Max number of SparkSQL result to display.")
             .add("args", "", "spark commandline args").build());
 
   }

http://git-wip-us.apache.org/repos/asf/incubator-zeppelin/blob/95dbdc7a/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
----------------------------------------------------------------------
diff --git 
a/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java 
b/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
index 3138fbc..6e30f1f 100644
--- a/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
+++ b/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
@@ -66,8 +66,13 @@ public class SparkSqlInterpreter extends Interpreter {
         "spark",
         SparkSqlInterpreter.class.getName(),
         new InterpreterPropertyBuilder()
-            .add("zeppelin.spark.maxResult", "10000", "Max number of SparkSQL 
result to display.")
-            .add("zeppelin.spark.concurrentSQL", "false",
+            .add("zeppelin.spark.maxResult",
+                SparkInterpreter.getSystemDefault("ZEPPELIN_SPARK_MAXRESULT",
+                    "zeppelin.spark.maxResult", "1000"),
+                "Max number of SparkSQL result to display.")
+            .add("zeppelin.spark.concurrentSQL",
+                
SparkInterpreter.getSystemDefault("ZEPPELIN_SPARK_CONCURRENTSQL",
+                    "zeppelin.spark.concurrentSQL", "false"),
                 "Execute multiple SQL concurrently if set true.")
             .build());
   }

Reply via email to