sahilTakiar commented on a change in pull request #597: HDFS-3246: pRead equivalent for direct read path URL: https://github.com/apache/hadoop/pull/597#discussion_r272775362
########## File path: hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java ########## @@ -336,22 +338,50 @@ public int read(long position, byte[] buffer, int offset, int length) } return n; - } catch (ClassCastException e) { + } else { throw new UnsupportedOperationException("This stream does not support " + "positioned read."); } } + + /** + * Positioned read using {@link ByteBuffer}s. This method is thread-safe. + */ + @Override + public int read(long position, final ByteBuffer buf) + throws IOException { + checkStream(); + if (in instanceof ByteBufferPositionedReadable) { + int bufPos = buf.position(); + final int n = ((ByteBufferPositionedReadable) in).read(position, buf); + if (n > 0) { + // This operation does not change the current offset of the file + decrypt(position, buf, n, bufPos); + } + + return n; + } else { + throw new UnsupportedOperationException("This stream does not support " + + "positioned reads with byte buffers."); + } + } /** * Decrypt length bytes in buffer starting at offset. Output is also put * into buffer starting at offset. It is thread-safe. */ private void decrypt(long position, byte[] buffer, int offset, int length) throws IOException { - ByteBuffer inBuffer = getBuffer(); - ByteBuffer outBuffer = getBuffer(); + ByteBuffer inBuffer = null; + ByteBuffer outBuffer = null; Decryptor decryptor = null; try { + // TODO we should be able to avoid copying chunks between the input buf, Review comment: Removed the TODO and filed HDFS-14417 ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services --------------------------------------------------------------------- To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-issues-h...@hadoop.apache.org