[ 
https://issues.apache.org/jira/browse/HADOOP-18501?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17629735#comment-17629735
 ] 

ASF GitHub Bot commented on HADOOP-18501:
-----------------------------------------

anmolanmol1234 commented on code in PR #5109:
URL: https://github.com/apache/hadoop/pull/5109#discussion_r1015231362


##########
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestPartialRead.java:
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.SocketException;
+import java.util.Random;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.azurebfs.services.AbfsClientThrottlingIntercept;
+import 
org.apache.hadoop.fs.azurebfs.services.AbfsClientThrottlingInterceptTestUtil;
+import org.apache.hadoop.fs.azurebfs.services.MockAbfsClient;
+import org.apache.hadoop.fs.azurebfs.services.MockAbfsClientThrottlingAnalyzer;
+import org.apache.hadoop.fs.azurebfs.services.MockHttpOperation;
+import org.apache.hadoop.fs.azurebfs.services.MockHttpOperationTestIntercept;
+import 
org.apache.hadoop.fs.azurebfs.services.MockHttpOperationTestInterceptResult;
+
+import static java.net.HttpURLConnection.HTTP_PARTIAL;
+import static 
org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.CONNECTION_RESET;
+import static 
org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB;
+
+public class ITestPartialRead extends AbstractAbfsIntegrationTest {
+
+  private static final String TEST_PATH = "/testfile";
+
+  private Logger LOG =
+      LoggerFactory.getLogger(ITestPartialRead.class);
+
+  public ITestPartialRead() throws Exception {
+  }
+
+
+  /**
+   * Test1: Execute read for 4 MB, but httpOperation will read for only 1MB.:
+   * retry with the remaining data, add data in throttlingIntercept.
+   * Test2: Execute read for 4 MB, but httpOperation will throw 
connection-rest exception + read 1 MB:
+   * retry with remaining data + add data in throttlingIntercept.
+   * */
+
+
+  private byte[] setup(final Path testPath, final int fileSize)
+      throws IOException {
+    final AzureBlobFileSystem fs = getFileSystem();
+    final AbfsConfiguration abfsConfiguration = fs.getAbfsStore()
+        .getAbfsConfiguration();
+    final int bufferSize = 4 * ONE_MB;
+    abfsConfiguration.setWriteBufferSize(bufferSize);
+    abfsConfiguration.setReadBufferSize(bufferSize);
+    abfsConfiguration.setReadAheadQueueDepth(0);
+
+    final byte[] b = new byte[fileSize];
+    new Random().nextBytes(b);
+
+
+    FSDataOutputStream stream = fs.create(testPath);
+    try {
+      stream.write(b);
+    } finally {
+      stream.close();
+    }
+    return b;
+  }
+
+  @Test
+  public void testRecoverPartialRead() throws Exception {
+    int fileSize = 4 * ONE_MB;
+    Path testPath = path(TEST_PATH);
+    byte[] originalFile = setup(testPath, fileSize);
+
+    final AzureBlobFileSystem fs = getFileSystem();
+    MockAbfsClient abfsClient = new MockAbfsClient(fs.getAbfsClient());
+
+    ActualServerReadByte actualServerReadByte = new ActualServerReadByte(
+        fileSize, originalFile);
+    MockHttpOperationTestIntercept mockHttpOperationTestIntercept
+        = new MockHttpOperationTestIntercept() {
+      private int callCount = 0;
+
+      @Override
+      public MockHttpOperationTestInterceptResult intercept(final 
MockHttpOperation mockHttpOperation,
+          final byte[] buffer,
+          final int offset,
+          final int length) throws IOException {
+        /*
+         * 1. Check if server can handle the request parameters.
+         * 2. return 1MB data to test-client.
+         */
+        callActualServerAndAssertBehaviour(mockHttpOperation, buffer, offset,
+            length, actualServerReadByte, ONE_MB);
+
+        MockHttpOperationTestInterceptResult
+            mockHttpOperationTestInterceptResult
+            = new MockHttpOperationTestInterceptResult();
+        mockHttpOperationTestInterceptResult.setStatus(HTTP_PARTIAL);
+        mockHttpOperationTestInterceptResult.setBytesRead(ONE_MB);
+        callCount++;
+        return mockHttpOperationTestInterceptResult;
+      }
+
+      public int getCallCount() {
+        return callCount;
+      }
+    };
+    abfsClient.setMockHttpOperationTestIntercept(
+        mockHttpOperationTestIntercept);
+    fs.getAbfsStore().setClient(abfsClient);
+
+    AbfsClientThrottlingIntercept intercept
+        = AbfsClientThrottlingInterceptTestUtil.get();
+    MockAbfsClientThrottlingAnalyzer readAnalyzer
+        = new MockAbfsClientThrottlingAnalyzer("read");
+    MockAbfsClientThrottlingAnalyzer analyzerToBeAsserted
+        = (MockAbfsClientThrottlingAnalyzer) 
AbfsClientThrottlingInterceptTestUtil.setReadAnalyzer(
+        intercept, readAnalyzer);
+
+    FSDataInputStream inputStream = fs.open(testPath);
+    byte[] buffer = new byte[fileSize];
+    inputStream.read(0, buffer, 0, fileSize);
+
+    Assert.assertEquals(4, mockHttpOperationTestIntercept.getCallCount());
+    Assert.assertEquals(4,
+        analyzerToBeAsserted.getFailedInstances().intValue());
+  }
+
+  private void callActualServerAndAssertBehaviour(final MockHttpOperation 
mockHttpOperation,
+      final byte[] buffer,
+      final int offset,
+      final int length,
+      final ActualServerReadByte actualServerReadByte,
+      final int byteLenMockServerReturn) throws IOException {
+    LOG.info("length: " + length + "; offset: " + offset);
+    mockHttpOperation.processResponseSuperCall(buffer, offset, length);
+    Assert.assertTrue(

Review Comment:
   Can use static Assert in place of junit asserts.





> [ABFS]: Partial Read should add to throttling metric
> ----------------------------------------------------
>
>                 Key: HADOOP-18501
>                 URL: https://issues.apache.org/jira/browse/HADOOP-18501
>             Project: Hadoop Common
>          Issue Type: Bug
>          Components: fs/azure
>    Affects Versions: 3.3.4
>            Reporter: Pranav Saxena
>            Assignee: Pranav Saxena
>            Priority: Minor
>              Labels: pull-request-available
>
> Error Description:
> For partial read (due to account backend throttling), the ABFS driver retry 
> but doesn't add up in the throttling metrics.
> In case of partial read with connection-reset exception, ABFS driver retry 
> for the full request and doesn't add up in throttling metrics.
> Mitigation:
> In case of partial read, Abfs Driver should retry for the remaining bytes and 
> it should be added in throttling metrics.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to