cat ../hadoop/spark-install/conf/spark-env.sh export SCALA_HOME=/home/hadoop/scala-install export SPARK_WORKER_MEMORY=83971m export SPARK_MASTER_IP=spark-m export SPARK_DAEMON_MEMORY=15744m export SPARK_WORKER_DIR=/hadoop/spark/work export SPARK_LOCAL_DIRS=/hadoop/spark/tmp export SPARK_LOG_DIR=/hadoop/spark/logs export SPARK_CLASSPATH=$SPARK_CLASSPATH:/home/hadoop/hadoop-install/lib/gcs-connector-1.3.2-hadoop1.jar export MASTER=spark://spark-m:7077
poiuytrez@spark-m:~$ cat ../hadoop/spark-install/conf/spark-defaults.conf spark.master spark://spark-m:7077 spark.eventLog.enabled true spark.eventLog.dir gs://xxxx-spark/spark-eventlog-base/spark-m spark.executor.memory 83971m spark.yarn.executor.memoryOverhead 83971m I am using spark-submit. -- View this message in context: http://apache-spark-user-list.1001560.n3.nabble.com/OutOfMemoryError-with-ramdom-forest-and-small-training-dataset-tp21598p21605.html Sent from the Apache Spark User List mailing list archive at Nabble.com. --------------------------------------------------------------------- To unsubscribe, e-mail: user-unsubscr...@spark.apache.org For additional commands, e-mail: user-h...@spark.apache.org