Confirming that I am also hitting the same errors.

host: r3.8xlarge

configuration 

spark.serializer org.apache.spark.serializer.KryoSerializer     
spark.driver.memory 200g                                                        
spark.serializer.objectStreamReset 10                                           
spark.local.dir /mnt/rohanp-data2/                                              
spark.driver.maxResultSize 0

Error

15/11/14 08:03:49 INFO BlockManagerInfo: Added broadcast_5_piece32 in memory
on localhost:56741 (size: 1728.1 KB, free: 103.1 GB)                   
[29/1049]
15/11/14 08:03:49 INFO SparkContext: Created broadcast 5 from broadcast at
Word2Vec.scala:286
Exception in thread "main" java.lang.OutOfMemoryError
        at
java.io.ByteArrayOutputStream.hugeCapacity(ByteArrayOutputStream.java:123)
        at
java.io.ByteArrayOutputStream.grow(ByteArrayOutputStream.java:117)
        at
java.io.ByteArrayOutputStream.ensureCapacity(ByteArrayOutputStream.java:93)
        at
java.io.ByteArrayOutputStream.write(ByteArrayOutputStream.java:153)
        at
java.io.ObjectOutputStream$BlockDataOutputStream.drain(ObjectOutputStream.java:1876)
        at
java.io.ObjectOutputStream$BlockDataOutputStream.setBlockDataMode(ObjectOutputStream.java:1785)
        at
java.io.ObjectOutputStream.writeObject0(ObjectOutputStream.java:1188)
        at
java.io.ObjectOutputStream.writeObject(ObjectOutputStream.java:347)
        at
org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:44)
        at
org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:84)
        at
org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:301)
        at
org.apache.spark.util.ClosureCleaner$.org$apache$spark$util$ClosureCleaner$$clean(ClosureCleaner.scala:294)
        at
org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:122)
        at org.apache.spark.SparkContext.clean(SparkContext.scala:2032)
        at
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1.apply(RDD.scala:707)
        at
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1.apply(RDD.scala:706)
        at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147)
        at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:108)
        at org.apache.spark.rdd.RDD.withScope(RDD.scala:310)
        at org.apache.spark.rdd.RDD.mapPartitions(RDD.scala:706)
        at org.apache.spark.mllib.feature.Word2Vec.fit(Word2Vec.scala:288)
        at Word2VecApp$.main(Word2VecApp.scala:13)
        at Word2VecApp.main(Word2VecApp.scala)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at
org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:674)
        at
org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)
        at
org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:120)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
15/11/14 08:04:21 INFO SparkContext: Invoking stop() from shutdown hook
15/11/14 08:04:21 INFO SparkUI: Stopped Spark web UI at
http://10.144.64.249:4040
15/11/14 08:04:21 INFO DAGScheduler: Stopping DAGScheduler

Code :

import org.apache.spark._
import org.apache.spark.rdd._
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.feature.{Word2Vec, Word2VecModel}
import org.apache.spark.SparkConf

object Word2VecApp {
  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("Word2Vec Application")
    val sc = new SparkContext(conf)
    val input =
sc.textFile("/mnt/rohanp-data1/data/training_queries.txt").map(line =>
line.split(" ").toSeq)
        val word2vec = new Word2Vec()
        val model = word2vec.fit(input)
        val synonyms = model.findSynonyms("china", 40)
        for((synonym, cosineSimilarity) <- synonyms) {
          println(s"$synonym $cosineSimilarity")
        }
  }
}

start command:
../spark-submit   --class "Word2VecApp"   --master local[30]  
target/scala-2.10/word2vec-project_2.10-1.0.jar



--
View this message in context: 
http://apache-spark-user-list.1001560.n3.nabble.com/Spark-ClosureCleaner-or-java-serializer-OOM-when-trying-to-grow-tp24796p25383.html
Sent from the Apache Spark User List mailing list archive at Nabble.com.

---------------------------------------------------------------------
To unsubscribe, e-mail: user-unsubscr...@spark.apache.org
For additional commands, e-mail: user-h...@spark.apache.org

Reply via email to