Github user vanzin commented on a diff in the pull request:

    https://github.com/apache/spark/pull/17295#discussion_r106691384
  
    --- Diff: core/src/main/scala/org/apache/spark/storage/DiskStore.scala ---
    @@ -73,55 +86,219 @@ private[spark] class DiskStore(conf: SparkConf, 
diskManager: DiskBlockManager) e
       }
     
       def putBytes(blockId: BlockId, bytes: ChunkedByteBuffer): Unit = {
    -    put(blockId) { fileOutputStream =>
    -      val channel = fileOutputStream.getChannel
    -      Utils.tryWithSafeFinally {
    -        bytes.writeFully(channel)
    -      } {
    -        channel.close()
    -      }
    +    put(blockId) { channel =>
    +      bytes.writeFully(channel)
         }
       }
     
    -  def getBytes(blockId: BlockId): ChunkedByteBuffer = {
    +  def getBytes(blockId: BlockId): BlockData = {
         val file = diskManager.getFile(blockId.name)
    -    val channel = new RandomAccessFile(file, "r").getChannel
    -    Utils.tryWithSafeFinally {
    -      // For small files, directly read rather than memory map
    -      if (file.length < minMemoryMapBytes) {
    -        val buf = ByteBuffer.allocate(file.length.toInt)
    -        channel.position(0)
    -        while (buf.remaining() != 0) {
    -          if (channel.read(buf) == -1) {
    -            throw new IOException("Reached EOF before filling buffer\n" +
    -              
s"offset=0\nfile=${file.getAbsolutePath}\nbuf.remaining=${buf.remaining}")
    +    val blockSize = getSize(blockId)
    +
    +    securityManager.getIOEncryptionKey() match {
    +      case Some(key) =>
    +        // Encrypted blocks cannot be memory mapped; return a special 
object that does decryption
    +        // and provides InputStream / FileRegion implementations for 
reading the data.
    +        new EncryptedBlockData(file, blockSize, conf, key)
    +
    +      case _ =>
    +        val channel = new FileInputStream(file).getChannel()
    +        if (blockSize < minMemoryMapBytes) {
    +          // For small files, directly read rather than memory map.
    +          Utils.tryWithSafeFinally {
    +            val buf = ByteBuffer.allocate(blockSize.toInt)
    +            while (buf.remaining() > 0) {
    +              channel.read(buf)
    +            }
    +            buf.flip()
    +            new ByteBufferBlockData(new ChunkedByteBuffer(buf))
    +          } {
    +            channel.close()
    +          }
    +        } else {
    +          Utils.tryWithSafeFinally {
    +            new ByteBufferBlockData(
    +              new ChunkedByteBuffer(channel.map(MapMode.READ_ONLY, 0, 
file.length)))
    +          } {
    +            channel.close()
               }
             }
    -        buf.flip()
    -        new ChunkedByteBuffer(buf)
    -      } else {
    -        new ChunkedByteBuffer(channel.map(MapMode.READ_ONLY, 0, 
file.length))
    -      }
    -    } {
    -      channel.close()
         }
       }
     
       def remove(blockId: BlockId): Boolean = {
         val file = diskManager.getFile(blockId.name)
    -    if (file.exists()) {
    -      val ret = file.delete()
    -      if (!ret) {
    -        logWarning(s"Error deleting ${file.getPath()}")
    +    val meta = diskManager.getMetadataFile(blockId)
    +
    +    def delete(f: File): Boolean = {
    +      if (f.exists()) {
    +        val ret = f.delete()
    +        if (!ret) {
    +          logWarning(s"Error deleting ${file.getPath()}")
    +        }
    +
    +        ret
    +      } else {
    +        false
           }
    -      ret
    -    } else {
    -      false
         }
    +
    +    delete(file) & delete(meta)
       }
     
       def contains(blockId: BlockId): Boolean = {
         val file = diskManager.getFile(blockId.name)
         file.exists()
       }
    +
    +  private def openForWrite(file: File): WritableByteChannel = {
    +    val out = new FileOutputStream(file).getChannel()
    +    try {
    +      securityManager.getIOEncryptionKey().map { key =>
    +        CryptoStreamUtils.createWritableChannel(out, conf, key)
    +      }.getOrElse(out)
    +    } catch {
    +      case e: Exception =>
    +        out.close()
    +        throw e
    +    }
    +  }
    +
    +}
    +
    +private class EncryptedBlockData(
    +    file: File,
    +    blockSize: Long,
    +    conf: SparkConf,
    +    key: Array[Byte]) extends BlockData {
    +
    +  override def toInputStream(): InputStream = 
Channels.newInputStream(open())
    +
    +  override def toManagedBuffer(): ManagedBuffer = new 
EncryptedManagedBuffer()
    +
    +  override def toByteBuffer(allocator: Int => ByteBuffer): 
ChunkedByteBuffer = {
    +    val source = open()
    +    try {
    +      var remaining = blockSize
    +      val chunks = new ListBuffer[ByteBuffer]()
    +      while (remaining > 0) {
    +        val chunkSize = math.min(remaining, Int.MaxValue)
    +        val chunk = allocator(chunkSize.toInt)
    +        remaining -= chunkSize
    +
    +        while (chunk.remaining() > 0) {
    +          source.read(chunk)
    +        }
    +        chunk.flip()
    +        chunks += chunk
    +      }
    +
    +      new ChunkedByteBuffer(chunks.toArray)
    +    } finally {
    +      source.close()
    +    }
    +  }
    +
    +  override def size: Long = blockSize
    +
    +  override def dispose(): Unit = { }
    +
    +  private def open(): ReadableByteChannel = {
    +    val channel = new FileInputStream(file).getChannel()
    +    try {
    +      CryptoStreamUtils.createReadableChannel(channel, conf, key)
    +    } catch {
    +      case e: Exception =>
    +        Closeables.close(channel, true)
    +        throw e
    +    }
    +  }
    +
    +  private class EncryptedManagedBuffer extends ManagedBuffer {
    +
    +    override def size(): Long = blockSize
    +
    +    override def nioByteBuffer(): ByteBuffer = {
    +      // This is used by the block transfer service to replicate blocks. 
The upload code reads
    +      // all bytes into memory to send the block to the remote executor, 
so it's ok to do this
    +      // as long as the block fits in a Java array.
    +      assert(blockSize <= Int.MaxValue, "Block is too large to be wrapped 
in a byte buffer.")
    +      val is = toInputStream()
    +      try {
    +        ByteBuffer.wrap(ByteStreams.toByteArray(is))
    --- End diff --
    
    There's a comment explaining it a few lines above...


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to