anoopsjohn commented on a change in pull request #2646:
URL: https://github.com/apache/hadoop/pull/2646#discussion_r571847078



##########
File path: 
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPread.java
##########
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.azurebfs.services;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.ExecutionException;
+
+import org.junit.Test;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FutureDataInputStreamBuilder;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest;
+import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+
+public class ITestAbfsPread extends AbstractAbfsIntegrationTest {
+
+  public ITestAbfsPread() throws Exception {
+  }
+
+  @Test
+  public void testPread() throws IOException {
+    describe("Testing preads in AbfsInputStream");
+    Path dest = path("ITestAbfsPread");
+
+    int dataSize = 100;
+    byte[] data = ContractTestUtils.dataset(dataSize, 'a', 26);
+    ContractTestUtils.writeDataset(getFileSystem(), dest, data, data.length,
+        dataSize, true);
+    int bytesToRead = 10;
+    try (FSDataInputStream inputStream = getFileSystem().open(dest)) {
+      assertTrue(
+          "unexpected stream type "
+              + inputStream.getWrappedStream().getClass().getSimpleName(),
+          inputStream.getWrappedStream() instanceof AbfsInputStream);
+      byte[] readBuffer = new byte[bytesToRead];
+      int pos = 0;
+      assertEquals(
+          "AbfsInputStream#read did not read the correct number of bytes",
+          bytesToRead, inputStream.read(pos, readBuffer, 0, bytesToRead));
+      assertTrue("AbfsInputStream#read did not read the correct bytes",
+          Arrays.equals(Arrays.copyOfRange(data, pos, pos + bytesToRead),
+              readBuffer));
+      // Read only 10 bytes from offset 0. But by default it will do the seek
+      // and read where the
+      // entire 100 bytes get read into the AbfsInputStream buffer.
+      assertArrayEquals(
+          "AbfsInputStream#read did not read more data into its buffer", data,
+          Arrays.copyOfRange(
+              ((AbfsInputStream) inputStream.getWrappedStream()).getBuffer(), 
0,
+              dataSize));
+    }
+    FutureDataInputStreamBuilder builder = getFileSystem().openFile(dest);
+    builder.opt(ConfigurationKeys.FS_AZURE_BUFFERED_PREAD_DISABLE, true);
+    FSDataInputStream inputStream = null;
+    try {
+      inputStream = builder.build().get();
+    } catch (IllegalArgumentException | UnsupportedOperationException
+        | InterruptedException | ExecutionException e) {
+      throw new IOException(e);
+    }
+    assertNotNull(inputStream);
+    try {
+      AbfsInputStream abfsIs = (AbfsInputStream) 
inputStream.getWrappedStream();
+      byte[] readBuffer = new byte[bytesToRead];
+      int pos = 10;
+      assertEquals(
+          "AbfsInputStream#read did not read the correct number of bytes",
+          bytesToRead, inputStream.read(pos, readBuffer, 0, bytesToRead));
+      assertTrue("AbfsInputStream#read did not read the correct bytes",
+          Arrays.equals(Arrays.copyOfRange(data, pos, pos + bytesToRead),
+              readBuffer));
+      // Read only 10 bytes from offset 10. This time, as buffered pread is
+      // disabled, it will only read the exact bytes as requested and no data
+      // will get read into the AbfsInputStream#buffer. Infact the buffer won't
+      // even get initialized.
+      assertNull("AbfsInputStream pread caused the internal buffer creation",
+          abfsIs.getBuffer());
+      // Now make a seek and read so that internal buffer gets created
+      inputStream.seek(0);
+      inputStream.read(readBuffer);
+      // This read would have fetched all 100 bytes into internal buffer.
+      assertArrayEquals(
+          "AbfsInputStream#read did not read more data into its buffer", data,
+          Arrays.copyOfRange(
+              ((AbfsInputStream) inputStream.getWrappedStream()).getBuffer(), 
0,
+              dataSize));
+      // Now again do pos read and make sure not any extra data being fetched.
+      resetBuffer(abfsIs.getBuffer());
+      pos = 0;
+      assertEquals(
+          "AbfsInputStream#read did not read the correct number of bytes",
+          bytesToRead, inputStream.read(pos, readBuffer, 0, bytesToRead));
+      assertTrue("AbfsInputStream#read did not read the correct bytes",
+          Arrays.equals(Arrays.copyOfRange(data, pos, pos + bytesToRead),
+              readBuffer));
+      assertFalse(
+          "AbfsInputStream#read read more data into its buffer than expected",
+          Arrays.equals(data,
+              Arrays.copyOfRange(abfsIs.getBuffer(), 0, dataSize)));
+    } finally {
+      inputStream.close();
+    }
+  }
+

Review comment:
       Done




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to