This is an automated email from the ASF dual-hosted git repository.

rakeshr pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 51cb858cc8c HADOOP-19208: [ABFS] Fixing logic to determine HNS nature 
of account to avoid extra getAcl() calls (#6893)
51cb858cc8c is described below

commit 51cb858cc8c23d873d4adfc21de5f2c1c22d346f
Author: Anuj Modi <128447756+anujmodi2...@users.noreply.github.com>
AuthorDate: Mon Jul 15 21:51:54 2024 +0530

    HADOOP-19208: [ABFS] Fixing logic to determine HNS nature of account to 
avoid extra getAcl() calls (#6893)
---
 .../fs/azurebfs/AzureBlobFileSystemStore.java      | 13 ++++--
 .../ITestAzureBlobFileSystemInitAndCreate.java     |  2 +
 .../fs/azurebfs/ITestGetNameSpaceEnabled.java      | 48 ++++++++++++++++++++++
 3 files changed, 60 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index ac564f082c9..449b123d921 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -395,14 +395,21 @@ public class AzureBlobFileSystemStore implements 
Closeable, ListingSupport {
     try {
       LOG.debug("Get root ACL status");
       getClient().getAclStatus(AbfsHttpConstants.ROOT_PATH, tracingContext);
+      // If getAcl succeeds, namespace is enabled.
       isNamespaceEnabled = Trilean.getTrilean(true);
     } catch (AbfsRestOperationException ex) {
-      // Get ACL status is a HEAD request, its response doesn't contain
-      // errorCode
-      // So can only rely on its status code to determine its account type.
+      // Get ACL status is a HEAD request, its response doesn't contain 
errorCode
+      // So can only rely on its status code to determine account type.
       if (HttpURLConnection.HTTP_BAD_REQUEST != ex.getStatusCode()) {
+        // If getAcl fails with anything other than 400, namespace is enabled.
+        isNamespaceEnabled = Trilean.getTrilean(true);
+        // Continue to throw exception as earlier.
+        LOG.debug("Failed to get ACL status with non 400. Inferring namespace 
enabled", ex);
         throw ex;
       }
+      // If getAcl fails with 400, namespace is disabled.
+      LOG.debug("Failed to get ACL status with 400. "
+          + "Inferring namespace disabled and ignoring error", ex);
       isNamespaceEnabled = Trilean.getTrilean(false);
     } catch (AzureBlobFileSystemException ex) {
       throw ex;
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java
index dcd73cc3e98..1ff3458fdba 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java
@@ -28,6 +28,7 @@ import org.mockito.Mockito;
 
 import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys;
 import 
org.apache.hadoop.fs.azurebfs.contracts.exceptions.TrileanConversionException;
+import org.apache.hadoop.fs.azurebfs.enums.Trilean;
 import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
 import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation;
 import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
@@ -67,6 +68,7 @@ public class ITestAzureBlobFileSystemInitAndCreate extends
     Mockito.doThrow(TrileanConversionException.class)
         .when(store)
         .isNamespaceEnabled();
+    store.setNamespaceEnabled(Trilean.UNKNOWN);
 
     TracingContext tracingContext = getSampleTracingContext(fs, true);
     Mockito.doReturn(Mockito.mock(AbfsRestOperation.class))
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java
index b40e317d2e3..d168ed38844 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java
@@ -24,9 +24,11 @@ import java.util.UUID;
 import org.junit.Assume;
 import org.junit.Test;
 import org.assertj.core.api.Assertions;
+import org.mockito.Mockito;
 
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
+import 
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
 import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
 import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation;
 import org.apache.hadoop.conf.Configuration;
@@ -34,9 +36,14 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azurebfs.enums.Trilean;
 import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
 
+import static java.net.HttpURLConnection.HTTP_BAD_REQUEST;
+import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR;
+import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
+import static java.net.HttpURLConnection.HTTP_UNAVAILABLE;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
@@ -217,4 +224,45 @@ public class ITestGetNameSpaceEnabled extends 
AbstractAbfsIntegrationTest {
     return mockClient;
   }
 
+  @Test
+  public void ensureGetAclDetermineHnsStatusAccurately() throws Exception {
+    ensureGetAclDetermineHnsStatusAccuratelyInternal(HTTP_BAD_REQUEST,
+        false, false);
+    ensureGetAclDetermineHnsStatusAccuratelyInternal(HTTP_NOT_FOUND,
+        true, true);
+    ensureGetAclDetermineHnsStatusAccuratelyInternal(HTTP_INTERNAL_ERROR,
+        true, true);
+    ensureGetAclDetermineHnsStatusAccuratelyInternal(HTTP_UNAVAILABLE,
+        true, true);
+  }
+
+  private void ensureGetAclDetermineHnsStatusAccuratelyInternal(int statusCode,
+      boolean expectedValue, boolean isExceptionExpected) throws Exception {
+    AzureBlobFileSystemStore store = 
Mockito.spy(getFileSystem().getAbfsStore());
+    AbfsClient mockClient = mock(AbfsClient.class);
+    store.setNamespaceEnabled(Trilean.UNKNOWN);
+    doReturn(mockClient).when(store).getClient();
+    AbfsRestOperationException ex = new AbfsRestOperationException(
+        statusCode, null, Integer.toString(statusCode), null);
+    doThrow(ex).when(mockClient).getAclStatus(anyString(), 
any(TracingContext.class));
+
+    if (isExceptionExpected) {
+      try {
+        store.getIsNamespaceEnabled(getTestTracingContext(getFileSystem(), 
false));
+        Assertions.fail(
+            "Exception Should have been thrown with status code: " + 
statusCode);
+      } catch (AbfsRestOperationException caughtEx) {
+        Assertions.assertThat(caughtEx.getStatusCode()).isEqualTo(statusCode);
+        
Assertions.assertThat(caughtEx.getErrorMessage()).isEqualTo(ex.getErrorMessage());
+      }
+    }
+    // This should not trigger extra getAcl() call in case of exceptions.
+    boolean isHnsEnabled = store.getIsNamespaceEnabled(
+        getTestTracingContext(getFileSystem(), false));
+    Assertions.assertThat(isHnsEnabled).isEqualTo(expectedValue);
+
+    // GetAcl() should be called only once to determine the HNS status.
+    Mockito.verify(mockClient, times(1))
+        .getAclStatus(anyString(), any(TracingContext.class));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to