[ 
https://issues.apache.org/jira/browse/SPARK-11138?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15432209#comment-15432209
 ] 

Ming Tang commented on SPARK-11138:
-----------------------------------

I got the same error.Here is my unit-tests.log:

..E..........................................................................s............Ivy
 Default Cache set to: /home/jenkins/.ivy2/cache
The jars for the packages stored in: /home/jenkins/.ivy2/jars
file:/tmp/tmp22Y47d added as a remote repository with the name: repo-1
:: loading settings :: url = 
jar:file:/home/jenkins/.jenkins/workspace/V100R002C00_Spark_Core_UT/assembly/target/scala-2.10/spark-assembly-1.3.0-hadoop2.7.1.jar!/org/apache/ivy/core/settings/ivysettings.xml
a#mylib added as a dependency
:: resolving dependencies :: org.apache.spark#spark-submit-parent;1.0
        confs: [default]
        found a#mylib;0.1 in repo-1
:: resolution report :: resolve 318ms :: artifacts dl 7ms
        :: modules in use:
        a#mylib;0.1 from repo-1 in [default]
        ---------------------------------------------------------------------
        |                  |            modules            ||   artifacts   |
        |       conf       | number| search|dwnlded|evicted|| number|dwnlded|
        ---------------------------------------------------------------------
        |      default     |   1   |   0   |   0   |   0   ||   1   |   0   |
        ---------------------------------------------------------------------
:: retrieving :: org.apache.spark#spark-submit-parent
        confs: [default]
        0 artifacts copied, 1 already retrieved (0kB/9ms)
.Ivy Default Cache set to: /home/jenkins/.ivy2/cache
The jars for the packages stored in: /home/jenkins/.ivy2/jars
file:/tmp/tmpW_7uje added as a remote repository with the name: repo-1
:: loading settings :: url = 
jar:file:/home/jenkins/.jenkins/workspace/V100R002C00_Spark_Core_UT/assembly/target/scala-2.10/spark-assembly-1.3.0-hadoop2.7.1.jar!/org/apache/ivy/core/settings/ivysettings.xml
a#mylib added as a dependency
:: resolving dependencies :: org.apache.spark#spark-submit-parent;1.0
        confs: [default]
        found a#mylib;0.1 in repo-1
:: resolution report :: resolve 249ms :: artifacts dl 5ms
        :: modules in use:
        a#mylib;0.1 from repo-1 in [default]
        ---------------------------------------------------------------------
        |                  |            modules            ||   artifacts   |
        |       conf       | number| search|dwnlded|evicted|| number|dwnlded|
        ---------------------------------------------------------------------
        |      default     |   1   |   0   |   0   |   0   ||   1   |   0   |
        ---------------------------------------------------------------------
:: retrieving :: org.apache.spark#spark-submit-parent
        confs: [default]
        0 artifacts copied, 1 already retrieved (0kB/5ms)
..........
======================================================================
ERROR: test_add_py_file (__main__.AddFileTests)
----------------------------------------------------------------------
Traceback (most recent call last):
  File 
"/home/jenkins/.jenkins/workspace/V100R002C00_Spark_Core_UT/python/pyspark/tests.py",
 line 402, in test_add_py_file
    res = self.sc.parallelize(range(2)).map(func).first()
  File "pyspark/rdd.py", line 1301, in first
    rs = self.take(1)
  File "pyspark/rdd.py", line 1283, in take
    res = self.context.runJob(self, takeUpToNumLeft, p, True)
  File "pyspark/context.py", line 897, in runJob
    allowLocal)
  File 
"/home/jenkins/.jenkins/workspace/V100R002C00_Spark_Core_UT/python/lib/py4j-0.8.2.1-src.zip/py4j/java_gateway.py",
 line 538, in __call__
    self.target_id, self.name)
  File 
"/home/jenkins/.jenkins/workspace/V100R002C00_Spark_Core_UT/python/lib/py4j-0.8.2.1-src.zip/py4j/protocol.py",
 line 300, in get_return_value
    format(target_id, '.', name), value)
py4j.protocol.Py4JJavaError: An error occurred while calling 
z:org.apache.spark.api.python.PythonRDD.runJob.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in 
stage 3.0 failed 1 times, most recent failure: Lost task 2.0 in stage 3.0 (TID 
7, localhost): org.apache.spark.api.python.PythonException: Traceback (most 
recent call last):
  File "pyspark/worker.py", line 111, in main
    process()
  File "pyspark/worker.py", line 106, in process
    serializer.dump_stream(func(split_index, iterator), outfile)
  File "pyspark/serializers.py", line 263, in dump_stream
    vs = list(itertools.islice(iterator, batch))
  File "pyspark/rdd.py", line 1279, in takeUpToNumLeft
    yield next(iterator)
  File 
"/home/jenkins/.jenkins/workspace/V100R002C00_Spark_Core_UT/python/pyspark/tests.py",
 line 394, in func
    from userlibrary import UserClass
ImportError: cannot import name UserClass

        at 
org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:167)
        at 
org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:208)
        at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:126)
        at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:71)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
        at org.apache.spark.scheduler.Task.run(Task.scala:70)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:222)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
        at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1285)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1276)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1275)
        at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
        at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1275)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:730)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:730)
        at scala.Option.foreach(Option.scala:236)
        at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:730)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1469)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1430)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)


----------------------------------------------------------------------
Ran 101 tests in 131.779s

FAILED (errors=1, skipped=1)
NOTE: Skipping SciPy tests as it does not seem to be installed
   Random listing order was used

Command exited with non-zero status 1
92.22user 4.84system 2:12.50elapsed 73%CPU (0avgtext+0avgdata 
340784maxresident)k
1072inputs+92312outputs (0major+727455minor)pagefaults 0swaps


> Flaky pyspark test: test_add_py_file
> ------------------------------------
>
>                 Key: SPARK-11138
>                 URL: https://issues.apache.org/jira/browse/SPARK-11138
>             Project: Spark
>          Issue Type: Bug
>          Components: Tests
>    Affects Versions: 1.6.0
>            Reporter: Marcelo Vanzin
>              Labels: flaky-test
>
> This test fails pretty often when running PR tests. For example:
> https://amplab.cs.berkeley.edu/jenkins/job/SparkPullRequestBuilder/43800/console
> {noformat}
> ======================================================================
> ERROR: test_add_py_file (__main__.AddFileTests)
> ----------------------------------------------------------------------
> Traceback (most recent call last):
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/pyspark/tests.py", 
> line 396, in test_add_py_file
>     res = self.sc.parallelize(range(2)).map(func).first()
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/pyspark/rdd.py", 
> line 1315, in first
>     rs = self.take(1)
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/pyspark/rdd.py", 
> line 1297, in take
>     res = self.context.runJob(self, takeUpToNumLeft, p)
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/pyspark/context.py",
>  line 923, in runJob
>     port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, 
> partitions)
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/lib/py4j-0.8.2.1-src.zip/py4j/java_gateway.py",
>  line 538, in __call__
>     self.target_id, self.name)
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/lib/py4j-0.8.2.1-src.zip/py4j/protocol.py",
>  line 300, in get_return_value
>     format(target_id, '.', name), value)
> Py4JJavaError: An error occurred while calling 
> z:org.apache.spark.api.python.PythonRDD.runJob.
> : org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 
> in stage 3.0 failed 1 times, most recent failure: Lost task 2.0 in stage 3.0 
> (TID 7, localhost): org.apache.spark.api.python.PythonException: Traceback 
> (most recent call last):
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/lib/pyspark.zip/pyspark/worker.py",
>  line 111, in main
>     process()
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/lib/pyspark.zip/pyspark/worker.py",
>  line 106, in process
>     serializer.dump_stream(func(split_index, iterator), outfile)
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/lib/pyspark.zip/pyspark/serializers.py",
>  line 263, in dump_stream
>     vs = list(itertools.islice(iterator, batch))
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/pyspark/rdd.py", 
> line 1293, in takeUpToNumLeft
>     yield next(iterator)
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/pyspark/tests.py", 
> line 388, in func
>     from userlibrary import UserClass
> ImportError: cannot import name UserClass
>       at 
> org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)
>       at 
> org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)
>       at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)
>       at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
>       at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>       at org.apache.spark.scheduler.Task.run(Task.scala:88)
>       at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>       at java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
>       at 
> org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1427)
>       at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1415)
>       at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1414)
>       at 
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>       at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>       at 
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1414)
>       at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:793)
>       at 
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:793)
>       at scala.Option.foreach(Option.scala:236)
>       at 
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:793)
>       at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1639)
>       at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1598)
>       at 
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1587)
>       at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>       at 
> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:614)
>       at org.apache.spark.SparkContext.runJob(SparkContext.scala:1830)
>       at org.apache.spark.SparkContext.runJob(SparkContext.scala:1843)
>       at org.apache.spark.SparkContext.runJob(SparkContext.scala:1856)
>       at org.apache.spark.api.python.PythonRDD$.runJob(PythonRDD.scala:393)
>       at org.apache.spark.api.python.PythonRDD.runJob(PythonRDD.scala)
>       at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>       at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:606)
>       at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
>       at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:379)
>       at py4j.Gateway.invoke(Gateway.java:259)
>       at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)
>       at py4j.commands.CallCommand.execute(CallCommand.java:79)
>       at py4j.GatewayConnection.run(GatewayConnection.java:207)
>       at java.lang.Thread.run(Thread.java:745)
> Caused by: org.apache.spark.api.python.PythonException: Traceback (most 
> recent call last):
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/lib/pyspark.zip/pyspark/worker.py",
>  line 111, in main
>     process()
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/lib/pyspark.zip/pyspark/worker.py",
>  line 106, in process
>     serializer.dump_stream(func(split_index, iterator), outfile)
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/lib/pyspark.zip/pyspark/serializers.py",
>  line 263, in dump_stream
>     vs = list(itertools.islice(iterator, batch))
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/pyspark/rdd.py", 
> line 1293, in takeUpToNumLeft
>     yield next(iterator)
>   File 
> "/home/jenkins/workspace/SparkPullRequestBuilder@2/python/pyspark/tests.py", 
> line 388, in func
>     from userlibrary import UserClass
> ImportError: cannot import name UserClass
>       at 
> org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)
>       at 
> org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)
>       at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)
>       at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)
>       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
>       at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
>       at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>       at org.apache.spark.scheduler.Task.run(Task.scala:88)
>       at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
>       at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>       at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>       ... 1 more
> ----------------------------------------------------------------------
> {noformat}
> /cc [~davies] in case you have some ideas.



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to