[ 
https://issues.apache.org/jira/browse/SPARK-3937?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14173025#comment-14173025
 ] 

Josh Rosen commented on SPARK-3937:
-----------------------------------

Another occurrence of this problem, running a recent-ish version of master 
(1.2):

{code}
java.lang.InternalError: a fault occurred in a recent unsafe memory access 
operation in compiled Java code
        at org.xerial.snappy.SnappyNative.uncompressedLength(Native Method)
        at org.xerial.snappy.Snappy.uncompressedLength(Snappy.java:594)
        at 
org.xerial.snappy.SnappyInputStream.hasNextChunk(SnappyInputStream.java:351)
        at org.xerial.snappy.SnappyInputStream.read(SnappyInputStream.java:384)
        at 
java.io.ObjectInputStream$PeekInputStream.peek(ObjectInputStream.java:2293)
        at 
java.io.ObjectInputStream$BlockDataInputStream.peek(ObjectInputStream.java:2586)
        at 
java.io.ObjectInputStream$BlockDataInputStream.peekByte(ObjectInputStream.java:2596)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1318)
        at 
java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1990)
        at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1915)
        at 
java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1798)
        at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1350)
        at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370)
        at 
org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:62)
        at 
org.apache.spark.serializer.DeserializationStream$$anon$1.getNext(Serializer.scala:133)
        at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:71)
        at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:350)
        at 
org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
        at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
        at 
org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:156)
        at 
org.apache.spark.sql.execution.Aggregate$$anonfun$execute$1$$anonfun$7.apply(Aggregate.scala:151)
        at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:599)
        at org.apache.spark.rdd.RDD$$anonfun$13.apply(RDD.scala:599)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
        at 
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
        at org.apache.spark.scheduler.Task.run(Task.scala:56)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:182)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)
{code}

> Unsafe memory access inside of Snappy library
> ---------------------------------------------
>
>                 Key: SPARK-3937
>                 URL: https://issues.apache.org/jira/browse/SPARK-3937
>             Project: Spark
>          Issue Type: Bug
>          Components: Spark Core
>    Affects Versions: 1.2.0
>            Reporter: Patrick Wendell
>
> This was observed on master between Spark 1.1 and 1.2. Unfortunately I don't 
> have much information about this other than the stack trace. However, it was 
> concerning enough I figured I should post it.
> {code}
> java.lang.InternalError: a fault occurred in a recent unsafe memory access 
> operation in compiled Java code
>         org.xerial.snappy.SnappyNative.rawUncompress(Native Method)
>         org.xerial.snappy.Snappy.rawUncompress(Snappy.java:444)
>         org.xerial.snappy.Snappy.uncompress(Snappy.java:480)
>         
> org.xerial.snappy.SnappyInputStream.hasNextChunk(SnappyInputStream.java:355)
>         
> org.xerial.snappy.SnappyInputStream.rawRead(SnappyInputStream.java:159)
>         org.xerial.snappy.SnappyInputStream.read(SnappyInputStream.java:142)
>         
> java.io.ObjectInputStream$PeekInputStream.read(ObjectInputStream.java:2310)
>         
> java.io.ObjectInputStream$BlockDataInputStream.read(ObjectInputStream.java:2712)
>         
> java.io.ObjectInputStream$BlockDataInputStream.readFully(ObjectInputStream.java:2742)
>         java.io.ObjectInputStream.readArray(ObjectInputStream.java:1687)
>         java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1344)
>         java.io.ObjectInputStream.readArray(ObjectInputStream.java:1706)
>         java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1344)
>         
> java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1990)
>         java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1915)
>         
> java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1798)
>         java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1350)
>         java.io.ObjectInputStream.readObject(ObjectInputStream.java:370)
>         
> org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:62)
>         
> org.apache.spark.serializer.DeserializationStream$$anon$1.getNext(Serializer.scala:133)
>         org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:71)
>         scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:350)
>         
> org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
>         scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
>         scala.collection.Iterator$$anon$14.hasNext(Iterator.scala:388)
>         scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
>         scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:308)
>         scala.collection.Iterator$class.foreach(Iterator.scala:727)
>         scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>         
> scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>         
> scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>         
> scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>         scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>         scala.collection.AbstractIterator.to(Iterator.scala:1157)
>         
> scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>         scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>         
> scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>         scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>         
> org.apache.spark.sql.execution.Limit$$anonfun$4.apply(basicOperators.scala:140)
>         
> org.apache.spark.sql.execution.Limit$$anonfun$4.apply(basicOperators.scala:140)
>         
> org.apache.spark.SparkContext$$anonfun$runJob$3.apply(SparkContext.scala:1118)
>         
> org.apache.spark.SparkContext$$anonfun$runJob$3.apply(SparkContext.scala:1118)
>         org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
>         org.apache.spark.scheduler.Task.run(Task.scala:56)
>         org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:182)
>         
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>         
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>         java.lang.Thread.run(Thread.java:745)
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to