[ https://issues.apache.org/jira/browse/SPARK-2891?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Davies Liu resolved SPARK-2891. ------------------------------- Resolution: Duplicate Fix Version/s: 1.1.0 duplicated to 2898 > Daemon failed to launch worker > ------------------------------ > > Key: SPARK-2891 > URL: https://issues.apache.org/jira/browse/SPARK-2891 > Project: Spark > Issue Type: Bug > Components: PySpark > Reporter: Davies Liu > Priority: Critical > Fix For: 1.1.0 > > > daviesliu@dm:~/work/spark-perf$ /Users/daviesliu/work/spark/bin/spark-submit > --master spark://dm:7077 pyspark-tests/tests.py SchedulerThroughputTest > --num-tasks=10000 --num-trials=4 --inter-trial-wait=1 > 14/08/06 17:58:04 WARN JettyUtils: Failed to create UI on port 4040. Trying > again on port 4041. - Failure(java.net.BindException: Address already in use) > Daemon failed to fork PySpark worker: [Errno 35] Resource temporarily > unavailable > 14/08/06 17:59:25 ERROR Executor: Exception in task 9777.0 in stage 1.0 (TID > 19777) > java.lang.IllegalStateException: Python daemon failed to launch worker > at > org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:71) > at > org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83) > at > org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82) > at > org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55) > at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101) > at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66) > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262) > at org.apache.spark.rdd.RDD.iterator(RDD.scala:229) > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62) > at org.apache.spark.scheduler.Task.run(Task.scala:54) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:745) > Daemon failed to fork PySpark worker: [Errno 35] Resource temporarily > unavailable > 14/08/06 17:59:25 ERROR Executor: Exception in task 9781.0 in stage 1.0 (TID > 19781) > java.lang.IllegalStateException: Python daemon failed to launch worker > at > org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:71) > at > org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83) > at > org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82) > at > org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55) > at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101) > at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66) > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262) > at org.apache.spark.rdd.RDD.iterator(RDD.scala:229) > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62) > at org.apache.spark.scheduler.Task.run(Task.scala:54) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:745) > 14/08/06 17:59:25 WARN TaskSetManager: Lost task 9777.0 in stage 1.0 (TID > 19777, localhost): java.lang.IllegalStateException: Python daemon failed to > launch worker > > org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:71) > > org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83) > > org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82) > > org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55) > org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101) > org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66) > org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262) > org.apache.spark.rdd.RDD.iterator(RDD.scala:229) > org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62) > org.apache.spark.scheduler.Task.run(Task.scala:54) > org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199) > > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > java.lang.Thread.run(Thread.java:745) > 14/08/06 17:59:25 ERROR TaskSetManager: Task 9777 in stage 1.0 failed 1 > times; aborting job -- This message was sent by Atlassian JIRA (v6.2#6252) --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org For additional commands, e-mail: issues-h...@spark.apache.org