[ 
https://issues.apache.org/jira/browse/PIG-5447?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17824093#comment-17824093
 ] 

Koji Noguchi commented on PIG-5447:
-----------------------------------

Full stack trace.

{noformat}
org.apache.pig.impl.logicalLayer.FrontendException: ERROR 1066: Unable to open 
iterator for alias C. Backend error : Job aborted.
at org.apache.pig.PigServer.openIterator(PigServer.java:1014)
at 
org.apache.pig.test.TestSkewedJoin.testSkewedJoinOuter(TestSkewedJoin.java:386)
Caused by: org.apache.spark.SparkException: Job aborted.
at 
org.apache.spark.internal.io.SparkHadoopWriter$.write(SparkHadoopWriter.scala:100)
at 
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1.apply$mcV$sp(PairRDDFunctions.scala:1083)
at 
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1.apply(PairRDDFunctions.scala:1081)
at 
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1.apply(PairRDDFunctions.scala:1081)
at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:385)
at 
org.apache.spark.rdd.PairRDDFunctions.saveAsNewAPIHadoopDataset(PairRDDFunctions.scala:1081)
at 
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopFile$2.apply$mcV$sp(PairRDDFunctions.scala:1000)
at 
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopFile$2.apply(PairRDDFunctions.scala:991)
at 
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopFile$2.apply(PairRDDFunctions.scala:991)
at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at 
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:385)
at 
org.apache.spark.rdd.PairRDDFunctions.saveAsNewAPIHadoopFile(PairRDDFunctions.scala:991)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.StoreConverter.convert(StoreConverter.java:104)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.StoreConverter.convert(StoreConverter.java:57)
at 
org.apache.pig.backend.hadoop.executionengine.spark.JobGraphBuilder.physicalToRDD(JobGraphBuilder.java:292)
at 
org.apache.pig.backend.hadoop.executionengine.spark.JobGraphBuilder.sparkOperToRDD(JobGraphBuilder.java:182)
at 
org.apache.pig.backend.hadoop.executionengine.spark.JobGraphBuilder.visitSparkOp(JobGraphBuilder.java:112)
at 
org.apache.pig.backend.hadoop.executionengine.spark.plan.SparkOperator.visit(SparkOperator.java:140)
at 
org.apache.pig.backend.hadoop.executionengine.spark.plan.SparkOperator.visit(SparkOperator.java:37)
at 
org.apache.pig.impl.plan.DependencyOrderWalker.walk(DependencyOrderWalker.java:87)
at org.apache.pig.impl.plan.PlanVisitor.visit(PlanVisitor.java:46)
at 
org.apache.pig.backend.hadoop.executionengine.spark.SparkLauncher.launchPig(SparkLauncher.java:241)
at 
org.apache.pig.backend.hadoop.executionengine.HExecutionEngine.launchPig(HExecutionEngine.java:290)
at org.apache.pig.PigServer.launchPlan(PigServer.java:1479)
at org.apache.pig.PigServer.executeCompiledLogicalPlan(PigServer.java:1464)
at org.apache.pig.PigServer.storeEx(PigServer.java:1123)
at org.apache.pig.PigServer.store(PigServer.java:1086)
at org.apache.pig.PigServer.openIterator(PigServer.java:999)
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: 
Task 1 in stage 94.0 failed 4 times, most recent failure: Lost task 1.3 in 
stage 94.0 (TID 436, gsrd238n19.red.ygrid.yahoo.com, executor 2): 
org.apache.spark.SparkException: Task failed while writing rows
at 
org.apache.spark.internal.io.SparkHadoopWriter$.org$apache$spark$internal$io$SparkHadoopWriter$$executeTask(SparkHadoopWriter.scala:157)
at 
org.apache.spark.internal.io.SparkHadoopWriter$$anonfun$3.apply(SparkHadoopWriter.scala:83)
at 
org.apache.spark.internal.io.SparkHadoopWriter$$anonfun$3.apply(SparkHadoopWriter.scala:78)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at 
org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:411)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:417)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.util.NoSuchElementException: next on empty iterator
at scala.collection.Iterator$$anon$2.next(Iterator.scala:39)
at scala.collection.Iterator$$anon$2.next(Iterator.scala:37)
at scala.collection.Iterator$$anon$12.next(Iterator.scala:445)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
at scala.collection.convert.Wrappers$IteratorWrapper.next(Wrappers.scala:31)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:199)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.SkewedJoinConverter$ToValueFunction$Tuple2TransformIterable$1.transform(SkewedJoinConverter.java:176)
at 
org.apache.pig.backend.hadoop.executionengine.spark.converter.IteratorTransform.next(IteratorTransform.java:37)
at scala.collection.convert.Wrappers$JIteratorWrapper.next(Wrappers.scala:43)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
at 
org.apache.spark.internal.io.SparkHadoopWriter$$anonfun$4.apply(SparkHadoopWriter.scala:131)
at 
org.apache.spark.internal.io.SparkHadoopWriter$$anonfun$4.apply(SparkHadoopWriter.scala:129)
at 
org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1394)
at 
org.apache.spark.internal.io.SparkHadoopWriter$.org$apache$spark$internal$io$SparkHadoopWriter$$executeTask(SparkHadoopWriter.scala:141)

{noformat}


> Pig-on-Spark TestSkewedJoin.testSkewedJoinOuter failing with 
> NoSuchElementException
> -----------------------------------------------------------------------------------
>
>                 Key: PIG-5447
>                 URL: https://issues.apache.org/jira/browse/PIG-5447
>             Project: Pig
>          Issue Type: Bug
>            Reporter: Koji Noguchi
>            Assignee: Koji Noguchi
>            Priority: Major
>
> TestSkewedJoin.testSkewedJoinOuter is consistently failing for right-outer 
> and full-outer joins.
> "Caused by: java.util.NoSuchElementException: next on empty iterator"



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to