[hadoop] branch branch-3.2 updated: HDFS-16345. Fix test case fail in TestBlockStoragePolicy (#3696). Contributed by guophilipse.

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 038759e  HDFS-16345. Fix test case fail in TestBlockStoragePolicy  
(#3696). Contributed by guophilipse.
038759e is described below

commit 038759e444c6dac28ae4b5474b1a43165ad5cd41
Author: GuoPhilipse <46367746+guophili...@users.noreply.github.com>
AuthorDate: Wed Dec 8 19:04:54 2021 +0800

HDFS-16345. Fix test case fail in TestBlockStoragePolicy  (#3696). 
Contributed by guophilipse.

Reviewed-by: Akira Ajisaka 
Signed-off-by: Ayush Saxena 
(cherry picked from commit 25849ff7b7430692d1f40f59e9cb24690cbda216)
---
 .../apache/hadoop/hdfs/TestBlockStoragePolicy.java | 106 -
 1 file changed, 62 insertions(+), 44 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index ab1f1ab..511838c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -1237,23 +1237,29 @@ public class TestBlockStoragePolicy {
 DFSTestUtil.formatNameNode(conf);
 NameNode namenode = new NameNode(conf);
 
-final BlockManager bm = namenode.getNamesystem().getBlockManager();
-BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
-NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
-for (DatanodeDescriptor datanode : dataNodes) {
-  cluster.add(datanode);
-}
-
-DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
-dataNodes[0], Collections.emptyList(), false,
-new HashSet(), 0, policy1, null);
-System.out.println(Arrays.asList(targets));
-Assert.assertEquals(3, targets.length);
-targets = replicator.chooseTarget("/foo", 3,
-dataNodes[0], Collections.emptyList(), false,
-new HashSet(), 0, policy2, null);
-System.out.println(Arrays.asList(targets));
-Assert.assertEquals(3, targets.length);
+try {
+  final BlockManager bm = namenode.getNamesystem().getBlockManager();
+  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
+  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
+  for (DatanodeDescriptor datanode : dataNodes) {
+cluster.add(datanode);
+  }
+
+  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
+  dataNodes[0], Collections.emptyList(), 
false,
+  new HashSet(), 0, policy1, null);
+  System.out.println(Arrays.asList(targets));
+  Assert.assertEquals(3, targets.length);
+  targets = replicator.chooseTarget("/foo", 3,
+  dataNodes[0], Collections.emptyList(), 
false,
+  new HashSet(), 0, policy2, null);
+  System.out.println(Arrays.asList(targets));
+  Assert.assertEquals(3, targets.length);
+} finally {
+  if (namenode != null) {
+namenode.stop();
+  }
+}
   }
 
   @Test
@@ -1284,20 +1290,26 @@ public class TestBlockStoragePolicy {
 DFSTestUtil.formatNameNode(conf);
 NameNode namenode = new NameNode(conf);
 
-final BlockManager bm = namenode.getNamesystem().getBlockManager();
-BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
-NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
-for (DatanodeDescriptor datanode : dataNodes) {
-  cluster.add(datanode);
-}
+try {
+  final BlockManager bm = namenode.getNamesystem().getBlockManager();
+  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
+  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
+  for (DatanodeDescriptor datanode : dataNodes) {
+cluster.add(datanode);
+  }
 
-DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
-dataNodes[0], Collections.emptyList(), false,
-new HashSet(), 0, policy, null);
-System.out.println(policy.getName() + ": " + Arrays.asList(targets));
-Assert.assertEquals(2, targets.length);
-Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
-Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
+  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
+  dataNodes[0], Collections.emptyList(), 
false,
+  new HashSet(), 0, policy, null);
+  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
+  Assert.assertEquals(2, targets.length);
+  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
+  Assert.assertEquals(StorageType.DISK, 

[hadoop] branch branch-3.3 updated: HDFS-16345. Fix test case fail in TestBlockStoragePolicy (#3696). Contributed by guophilipse.

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 2720479  HDFS-16345. Fix test case fail in TestBlockStoragePolicy  
(#3696). Contributed by guophilipse.
2720479 is described below

commit 2720479988696499e08042bb28d340ad8e6115af
Author: GuoPhilipse <46367746+guophili...@users.noreply.github.com>
AuthorDate: Wed Dec 8 19:04:54 2021 +0800

HDFS-16345. Fix test case fail in TestBlockStoragePolicy  (#3696). 
Contributed by guophilipse.

Reviewed-by: Akira Ajisaka 
Signed-off-by: Ayush Saxena 
(cherry picked from commit 25849ff7b7430692d1f40f59e9cb24690cbda216)
---
 .../apache/hadoop/hdfs/TestBlockStoragePolicy.java | 106 -
 1 file changed, 62 insertions(+), 44 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 171ebb4..def5afa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -1237,23 +1237,29 @@ public class TestBlockStoragePolicy {
 DFSTestUtil.formatNameNode(conf);
 NameNode namenode = new NameNode(conf);
 
-final BlockManager bm = namenode.getNamesystem().getBlockManager();
-BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
-NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
-for (DatanodeDescriptor datanode : dataNodes) {
-  cluster.add(datanode);
-}
-
-DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
-dataNodes[0], Collections.emptyList(), false,
-new HashSet(), 0, policy1, null);
-System.out.println(Arrays.asList(targets));
-Assert.assertEquals(3, targets.length);
-targets = replicator.chooseTarget("/foo", 3,
-dataNodes[0], Collections.emptyList(), false,
-new HashSet(), 0, policy2, null);
-System.out.println(Arrays.asList(targets));
-Assert.assertEquals(3, targets.length);
+try {
+  final BlockManager bm = namenode.getNamesystem().getBlockManager();
+  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
+  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
+  for (DatanodeDescriptor datanode : dataNodes) {
+cluster.add(datanode);
+  }
+
+  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
+  dataNodes[0], Collections.emptyList(), 
false,
+  new HashSet(), 0, policy1, null);
+  System.out.println(Arrays.asList(targets));
+  Assert.assertEquals(3, targets.length);
+  targets = replicator.chooseTarget("/foo", 3,
+  dataNodes[0], Collections.emptyList(), 
false,
+  new HashSet(), 0, policy2, null);
+  System.out.println(Arrays.asList(targets));
+  Assert.assertEquals(3, targets.length);
+} finally {
+  if (namenode != null) {
+namenode.stop();
+  }
+}
   }
 
   @Test
@@ -1284,20 +1290,26 @@ public class TestBlockStoragePolicy {
 DFSTestUtil.formatNameNode(conf);
 NameNode namenode = new NameNode(conf);
 
-final BlockManager bm = namenode.getNamesystem().getBlockManager();
-BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
-NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
-for (DatanodeDescriptor datanode : dataNodes) {
-  cluster.add(datanode);
-}
+try {
+  final BlockManager bm = namenode.getNamesystem().getBlockManager();
+  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
+  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
+  for (DatanodeDescriptor datanode : dataNodes) {
+cluster.add(datanode);
+  }
 
-DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
-dataNodes[0], Collections.emptyList(), false,
-new HashSet(), 0, policy, null);
-System.out.println(policy.getName() + ": " + Arrays.asList(targets));
-Assert.assertEquals(2, targets.length);
-Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
-Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
+  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
+  dataNodes[0], Collections.emptyList(), 
false,
+  new HashSet(), 0, policy, null);
+  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
+  Assert.assertEquals(2, targets.length);
+  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
+  Assert.assertEquals(StorageType.DISK, 

[hadoop] branch branch-3.2 updated: HADOOP-16905. Update jackson-databind to 2.10.3 to relieve us from the endless CVE patches (#3748)

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new bc68741  HADOOP-16905. Update jackson-databind to 2.10.3 to relieve us 
from the endless CVE patches (#3748)
bc68741 is described below

commit bc6874139f534af81a83523cf10508d3d16a032f
Author: Akira Ajisaka 
AuthorDate: Fri Dec 10 16:24:06 2021 +0900

HADOOP-16905. Update jackson-databind to 2.10.3 to relieve us from the 
endless CVE patches (#3748)

(cherry picked from commit 69faaa1d58ad7de18a8dfa477531653a2c061568)

 Conflicts:
hadoop-project/pom.xml
---
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 7 +++
 hadoop-project/pom.xml  | 4 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 8e1d248..535f964 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -335,6 +335,13 @@
   
 
 
+  javax/xml/bind/
+  
${shaded.dependency.prefix}.javax.xml.bind.
+  
+**/pom.xml
+  
+
+
   net/
   
${shaded.dependency.prefix}.net.
   
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a173af8..4d4e166 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -71,8 +71,8 @@
 
 
 1.9.13
-2.9.10
-2.9.10.4
+2.10.3
+2.10.3
 
 
 4.5.13

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-17643 WASB : Make metadata checks case insensitive (#3103)

2021-12-09 Thread surendralilhore
This is an automated email from the ASF dual-hosted git repository.

surendralilhore pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 9a1c8d2  HADOOP-17643 WASB : Make metadata checks case insensitive 
(#3103)
9a1c8d2 is described below

commit 9a1c8d2f4171c66fb791c93f75ace64ad7547a71
Author: Anoop Sam John 
AuthorDate: Fri Dec 10 10:44:31 2021 +0530

HADOOP-17643 WASB : Make metadata checks case insensitive (#3103)
---
 .../fs/azure/AzureNativeFileSystemStore.java   | 76 +++---
 1 file changed, 52 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 368283a..3912771 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -40,6 +40,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.Set;
 
@@ -181,6 +182,11 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
*/
   public static final String KEY_USE_LOCAL_SAS_KEY_MODE = 
"fs.azure.local.sas.key.mode";
 
+  /**
+   * Config to control case sensitive metadata key checks/retrieval. If this
+   * is false, blob metadata keys will be treated case insensitive.
+   */
+  private static final String KEY_BLOB_METADATA_KEY_CASE_SENSITIVE = 
"fs.azure.blob.metadata.key.case.sensitive";
   private static final String PERMISSION_METADATA_KEY = "hdi_permission";
   private static final String OLD_PERMISSION_METADATA_KEY = "asv_permission";
   private static final String IS_FOLDER_METADATA_KEY = "hdi_isfolder";
@@ -364,6 +370,8 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 
   private String delegationToken;
 
+  private boolean metadataKeyCaseSensitive;
+
   /** The error message template when container is not accessible. */
   public static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials 
found for "
   + "account %s in the configuration, and its container %s is not "
@@ -585,6 +593,12 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   LOG.warn("Unable to initialize HBase root as an atomic rename 
directory.");
 }
 LOG.debug("Atomic rename directories: {} ", setToString(atomicRenameDirs));
+metadataKeyCaseSensitive = conf
+.getBoolean(KEY_BLOB_METADATA_KEY_CASE_SENSITIVE, true);
+if (!metadataKeyCaseSensitive) {
+  LOG.info("{} configured as false. Blob metadata will be treated case 
insensitive.",
+  KEY_BLOB_METADATA_KEY_CASE_SENSITIVE);
+}
   }
 
   /**
@@ -1633,15 +1647,24 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 blob.setMetadata(metadata);
   }
 
-  private static String getMetadataAttribute(CloudBlobWrapper blob,
+  private String getMetadataAttribute(HashMap metadata,
   String... keyAlternatives) {
-HashMap metadata = blob.getMetadata();
 if (null == metadata) {
   return null;
 }
 for (String key : keyAlternatives) {
-  if (metadata.containsKey(key)) {
-return metadata.get(key);
+  if (metadataKeyCaseSensitive) {
+if (metadata.containsKey(key)) {
+  return metadata.get(key);
+}
+  } else {
+// See HADOOP-17643 for details on why this case insensitive metadata
+// checks been added
+for (Entry entry : metadata.entrySet()) {
+  if (key.equalsIgnoreCase(entry.getKey())) {
+return entry.getValue();
+  }
+}
   }
 }
 return null;
@@ -1665,7 +1688,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   }
 
   private PermissionStatus getPermissionStatus(CloudBlobWrapper blob) {
-String permissionMetadataValue = getMetadataAttribute(blob,
+String permissionMetadataValue = getMetadataAttribute(blob.getMetadata(),
 PERMISSION_METADATA_KEY, OLD_PERMISSION_METADATA_KEY);
 if (permissionMetadataValue != null) {
   return PermissionStatusJsonSerializer.fromJSONString(
@@ -1713,19 +1736,32 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 OLD_LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
   }
 
-  private static String getLinkAttributeValue(CloudBlobWrapper blob)
+  private String getLinkAttributeValue(CloudBlobWrapper blob)
   throws UnsupportedEncodingException {
-String encodedLinkTarget = getMetadataAttribute(blob,
+String encodedLinkTarget = getMetadataAttribute(blob.getMetadata(),
 

[hadoop] branch trunk updated: YARN-10982. Replace all occurences of queuePath with the new QueuePath class. Contributed by Tibor Kovacs

2021-12-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a0d8cde  YARN-10982. Replace all occurences of queuePath with the new 
QueuePath class. Contributed by Tibor Kovacs
a0d8cde is described below

commit a0d8cde133a7d8719217567efaa4e66c583f790d
Author: Szilard Nemeth 
AuthorDate: Thu Dec 9 17:51:44 2021 +0100

YARN-10982. Replace all occurences of queuePath with the new QueuePath 
class. Contributed by Tibor Kovacs
---
 .../scheduler/capacity/AbstractCSQueue.java| 25 +++---
 .../capacity/AbstractManagedParentQueue.java   |  2 +-
 .../capacity/AutoCreatedQueueTemplate.java | 14 ++--
 .../scheduler/capacity/CSQueue.java|  6 ++
 .../scheduler/capacity/CSQueueUtils.java   |  8 +-
 .../capacity/CapacitySchedulerConfiguration.java   | 57 +++---
 .../scheduler/capacity/ManagedParentQueue.java |  9 +--
 .../scheduler/capacity/ParentQueue.java| 22 +++---
 .../scheduler/capacity/QueuePath.java  | 23 ++
 .../TestAbsoluteResourceConfiguration.java | 90 +++---
 .../TestAbsoluteResourceWithAutoQueue.java | 18 ++---
 .../capacity/TestAutoCreatedQueueTemplate.java | 57 +++---
 .../capacity/TestCSAllocateCustomResource.java |  4 +-
 .../scheduler/capacity/TestCapacityScheduler.java  |  9 ++-
 .../TestCapacitySchedulerConfigValidator.java  | 35 +
 .../scheduler/capacity/TestParentQueue.java|  4 +-
 .../scheduler/capacity/TestQueuePath.java  | 10 +++
 .../TestRMWebServicesConfigurationMutation.java| 15 ++--
 18 files changed, 233 insertions(+), 175 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index efdfa8e..097a9df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -82,7 +82,6 @@ public abstract class AbstractCSQueue implements CSQueue {
   protected final QueueAllocationSettings queueAllocationSettings;
   volatile CSQueue parent;
   protected final QueuePath queuePath;
-  final String queueName;
   protected QueueNodeLabelsSettings queueNodeLabelsSettings;
   private volatile QueueAppLifetimeAndLimitSettings queueAppLifetimeSettings;
   private CSQueuePreemptionSettings preemptionSettings;
@@ -143,7 +142,6 @@ public abstract class AbstractCSQueue implements CSQueue {
 this.labelManager = cs.getRMContext().getNodeLabelManager();
 this.parent = parent;
 this.queuePath = createQueuePath(parent, queueName);
-this.queueName = queuePath.getLeafName();
 this.resourceCalculator = cs.getResourceCalculator();
 this.activitiesManager = cs.getActivitiesManager();
 
@@ -176,7 +174,7 @@ public abstract class AbstractCSQueue implements CSQueue {
 
   protected void setupConfigurableCapacities(
   CapacitySchedulerConfiguration configuration) {
-CSQueueUtils.loadCapacitiesByLabelsFromConf(getQueuePath(), 
queueCapacities,
+CSQueueUtils.loadCapacitiesByLabelsFromConf(queuePath, queueCapacities,
 configuration, this.queueNodeLabelsSettings.getConfiguredNodeLabels());
   }
 
@@ -186,6 +184,11 @@ public abstract class AbstractCSQueue implements CSQueue {
   }
 
   @Override
+  public QueuePath getQueuePathObject() {
+return this.queuePath;
+  }
+
+  @Override
   public float getCapacity() {
 return queueCapacities.getCapacity();
   }
@@ -241,7 +244,7 @@ public abstract class AbstractCSQueue implements CSQueue {
 
   @Override
   public String getQueueName() {
-return queueName;
+return this.queuePath.getLeafName();
   }
 
   @Override
@@ -279,11 +282,11 @@ public abstract class AbstractCSQueue implements CSQueue {
 writeLock.lock();
 try {
   // Sanity check
-  CSQueueUtils.checkMaxCapacity(getQueuePath(),
+  CSQueueUtils.checkMaxCapacity(this.queuePath,
   queueCapacities.getCapacity(), maximumCapacity);
   float absMaxCapacity = CSQueueUtils.computeAbsoluteMaximumCapacity(
   maximumCapacity, parent);
-  CSQueueUtils.checkAbsoluteCapacity(getQueuePath(),
+  CSQueueUtils.checkAbsoluteCapacity(this.queuePath,
   queueCapacities.getAbsoluteCapacity(), absMaxCapacity);
 
   

[hadoop] branch branch-2.10 updated: HADOOP-18040. Use maven.test.failure.ignore instead of ignoreTestFailure (#3774)

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 7d88a54  HADOOP-18040. Use maven.test.failure.ignore instead of 
ignoreTestFailure (#3774)
7d88a54 is described below

commit 7d88a54d40c19353ebd79ab9988dcb582def2c3f
Author: Akira Ajisaka 
AuthorDate: Fri Dec 10 01:36:31 2021 +0900

HADOOP-18040. Use maven.test.failure.ignore instead of ignoreTestFailure 
(#3774)

Reviewed-by: Masatake Iwasaki 
(cherry picked from commit 9b9e2ef87ffe7b33dc01699892acf62965fb3550)

 Conflicts:
hadoop-tools/hadoop-federation-balance/pom.xml

(cherry picked from commit 35c5c6bb83beb4f68e1ff18907589b062a0467e8)

 Conflicts:
hadoop-common-project/hadoop-registry/pom.xml

(cherry picked from commit 94ca965e21067f1d957d21f19ae8e6abca9da971)
---
 hadoop-common-project/hadoop-common/pom.xml  | 1 -
 hadoop-common-project/hadoop-kms/pom.xml | 1 -
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 2 --
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 1 -
 hadoop-project/pom.xml   | 3 +--
 hadoop-tools/hadoop-distcp/pom.xml   | 1 -
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 1 -
 7 files changed, 1 insertion(+), 9 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 4b29437..1da0b49 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -799,7 +799,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-common-project/hadoop-kms/pom.xml 
b/hadoop-common-project/hadoop-kms/pom.xml
index 78e26d9..45bc8d3 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -206,7 +206,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   1
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index 481f71c..2797ad3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -262,7 +262,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   600
   
@@ -409,7 +408,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   true
   
600
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index b6f19f6..97c7cec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -427,7 +427,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index d1b1f1c..6fc2e46 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -35,7 +35,7 @@
 
 false
 
-true
+true
 
true
 6.1.26
 _
@@ -1616,7 +1616,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   false
   
${surefire.fork.timeout}
   ${maven-surefire-plugin.argLine}
diff --git a/hadoop-tools/hadoop-distcp/pom.xml 
b/hadoop-tools/hadoop-distcp/pom.xml
index 0c96aed..9c0795a 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -124,7 +124,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   600
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 333fd5a..3d7e355 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -168,7 +168,6 @@
   org.apache.maven.plugins
   maven-surefire-plugin
   
-${ignoreTestFailure}
 false
 900
 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError


[hadoop] branch branch-3.2 updated: HADOOP-18040. Use maven.test.failure.ignore instead of ignoreTestFailure (#3774)

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 94ca965  HADOOP-18040. Use maven.test.failure.ignore instead of 
ignoreTestFailure (#3774)
94ca965 is described below

commit 94ca965e21067f1d957d21f19ae8e6abca9da971
Author: Akira Ajisaka 
AuthorDate: Fri Dec 10 01:36:31 2021 +0900

HADOOP-18040. Use maven.test.failure.ignore instead of ignoreTestFailure 
(#3774)

Reviewed-by: Masatake Iwasaki 
(cherry picked from commit 9b9e2ef87ffe7b33dc01699892acf62965fb3550)

 Conflicts:
hadoop-tools/hadoop-federation-balance/pom.xml

(cherry picked from commit 35c5c6bb83beb4f68e1ff18907589b062a0467e8)

 Conflicts:
hadoop-common-project/hadoop-registry/pom.xml
---
 hadoop-common-project/hadoop-common/pom.xml  | 1 -
 hadoop-common-project/hadoop-kms/pom.xml | 1 -
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 2 --
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 1 -
 hadoop-project/pom.xml   | 3 +--
 hadoop-tools/hadoop-distcp/pom.xml   | 1 -
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 1 -
 7 files changed, 1 insertion(+), 9 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index e0c80bc..d215b43 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -1000,7 +1000,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-common-project/hadoop-kms/pom.xml 
b/hadoop-common-project/hadoop-kms/pom.xml
index bded0b5..5d3f9f6 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -186,7 +186,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   1
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index e398bfb..e951320 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -247,7 +247,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   600
   
@@ -362,7 +361,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   true
   
600
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index c2b6f95..820879d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -507,7 +507,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index e33ce51..a173af8 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -35,7 +35,7 @@
 
 false
 
-true
+true
 
true
 9.4.40.v20210413
 _
@@ -1760,7 +1760,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   false
   
${surefire.fork.timeout}
   ${maven-surefire-plugin.argLine}
diff --git a/hadoop-tools/hadoop-distcp/pom.xml 
b/hadoop-tools/hadoop-distcp/pom.xml
index 72b6b5b..223f018 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -123,7 +123,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   600
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 36f6cf2..dbddbb3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -225,7 +225,6 @@
   org.apache.maven.plugins
   maven-surefire-plugin
   
-${ignoreTestFailure}
 false
 900
 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError

-
To unsubscribe, e-mail: 

[hadoop] branch branch-3.3 updated: HADOOP-18040. Use maven.test.failure.ignore instead of ignoreTestFailure (#3774)

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 35c5c6b  HADOOP-18040. Use maven.test.failure.ignore instead of 
ignoreTestFailure (#3774)
35c5c6b is described below

commit 35c5c6bb83beb4f68e1ff18907589b062a0467e8
Author: Akira Ajisaka 
AuthorDate: Fri Dec 10 01:36:31 2021 +0900

HADOOP-18040. Use maven.test.failure.ignore instead of ignoreTestFailure 
(#3774)

Reviewed-by: Masatake Iwasaki 
(cherry picked from commit 9b9e2ef87ffe7b33dc01699892acf62965fb3550)

 Conflicts:
hadoop-tools/hadoop-federation-balance/pom.xml
---
 hadoop-common-project/hadoop-common/pom.xml| 1 -
 hadoop-common-project/hadoop-kms/pom.xml   | 1 -
 hadoop-common-project/hadoop-registry/pom.xml  | 1 -
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 2 --
 hadoop-hdfs-project/hadoop-hdfs/pom.xml| 1 -
 hadoop-project/pom.xml | 3 +--
 hadoop-tools/hadoop-distcp/pom.xml | 1 -
 7 files changed, 1 insertion(+), 9 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 4a5808a..086a77f 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -912,7 +912,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-common-project/hadoop-kms/pom.xml 
b/hadoop-common-project/hadoop-kms/pom.xml
index 692af3e..71be873 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -186,7 +186,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   1
diff --git a/hadoop-common-project/hadoop-registry/pom.xml 
b/hadoop-common-project/hadoop-registry/pom.xml
index 72abe20..5cfc2fe 100644
--- a/hadoop-common-project/hadoop-registry/pom.xml
+++ b/hadoop-common-project/hadoop-registry/pom.xml
@@ -220,7 +220,6 @@
   org.apache.maven.plugins
   maven-surefire-plugin
   
-${ignoreTestFailure}
 false
 900
 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index e5fa1c0..e571d74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -247,7 +247,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   600
   
@@ -361,7 +360,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   true
   
600
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 3ada6b8..df5d2cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -461,7 +461,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 4db0ec7..9063e302 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -35,7 +35,7 @@
 
 false
 
-true
+true
 
true
 9.4.43.v20210629
 _
@@ -2084,7 +2084,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   false
   
${surefire.fork.timeout}
   ${maven-surefire-plugin.argLine}
diff --git a/hadoop-tools/hadoop-distcp/pom.xml 
b/hadoop-tools/hadoop-distcp/pom.xml
index b438e5c..5e306ea 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -128,7 +128,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   600

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-18040. Use maven.test.failure.ignore instead of ignoreTestFailure (#3774)

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9b9e2ef  HADOOP-18040. Use maven.test.failure.ignore instead of 
ignoreTestFailure (#3774)
9b9e2ef is described below

commit 9b9e2ef87ffe7b33dc01699892acf62965fb3550
Author: Akira Ajisaka 
AuthorDate: Fri Dec 10 01:36:31 2021 +0900

HADOOP-18040. Use maven.test.failure.ignore instead of ignoreTestFailure 
(#3774)

Reviewed-by: Masatake Iwasaki 
---
 hadoop-common-project/hadoop-common/pom.xml| 1 -
 hadoop-common-project/hadoop-kms/pom.xml   | 1 -
 hadoop-common-project/hadoop-registry/pom.xml  | 1 -
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml | 2 --
 hadoop-hdfs-project/hadoop-hdfs/pom.xml| 1 -
 hadoop-project/pom.xml | 3 +--
 hadoop-tools/hadoop-distcp/pom.xml | 1 -
 hadoop-tools/hadoop-federation-balance/pom.xml | 1 -
 8 files changed, 1 insertion(+), 10 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index bcba228..a75ab5e 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -915,7 +915,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-common-project/hadoop-kms/pom.xml 
b/hadoop-common-project/hadoop-kms/pom.xml
index 9de8b9c..96588a2 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -186,7 +186,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   1
diff --git a/hadoop-common-project/hadoop-registry/pom.xml 
b/hadoop-common-project/hadoop-registry/pom.xml
index 171b722..725dda5 100644
--- a/hadoop-common-project/hadoop-registry/pom.xml
+++ b/hadoop-common-project/hadoop-registry/pom.xml
@@ -231,7 +231,6 @@
   org.apache.maven.plugins
   maven-surefire-plugin
   
-${ignoreTestFailure}
 false
 900
 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index 1916ef0..a1b3ab1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -247,7 +247,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   600
   
@@ -361,7 +360,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   true
   
600
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 10d66d0..bc05d85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -471,7 +471,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6b7e016..62e0472 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -35,7 +35,7 @@
 
 false
 
-true
+true
 
true
 9.4.44.v20210927
 _
@@ -2126,7 +2126,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   false
   
${surefire.fork.timeout}
   ${maven-surefire-plugin.argLine}
diff --git a/hadoop-tools/hadoop-distcp/pom.xml 
b/hadoop-tools/hadoop-distcp/pom.xml
index 7e5aaeb..55738ef 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -128,7 +128,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   600
diff --git a/hadoop-tools/hadoop-federation-balance/pom.xml 
b/hadoop-tools/hadoop-federation-balance/pom.xml
index 588bb98..71f2cb3 100644
--- a/hadoop-tools/hadoop-federation-balance/pom.xml
+++ b/hadoop-tools/hadoop-federation-balance/pom.xml
@@ -138,7 +138,6 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
-  ${ignoreTestFailure}
   1
   false
   600

-
To unsubscribe, e-mail: 

[hadoop] branch branch-2.10 updated: HADOOP-18035. Ignore unit test failures to run all the unit tests from root (#3765)

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new eda9340  HADOOP-18035. Ignore unit test failures to run all the unit 
tests from root (#3765)
eda9340 is described below

commit eda934025bc63d0eae0672e0464697d8ccea2b41
Author: Akira Ajisaka 
AuthorDate: Fri Dec 10 01:27:27 2021 +0900

HADOOP-18035. Ignore unit test failures to run all the unit tests from root 
(#3765)

Reviewed-by: Masatake Iwasaki 
(cherry picked from commit 3854b59bd1b06e9740cf33efc7844b66eec30ba4)
---
 hadoop-common-project/hadoop-common/pom.xml  | 1 +
 hadoop-common-project/hadoop-kms/pom.xml | 1 +
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 1 +
 hadoop-project/pom.xml   | 3 +++
 hadoop-tools/hadoop-distcp/pom.xml   | 1 +
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 1 +
 7 files changed, 10 insertions(+)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 1da0b49..4b29437 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -799,6 +799,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-common-project/hadoop-kms/pom.xml 
b/hadoop-common-project/hadoop-kms/pom.xml
index 45bc8d3..78e26d9 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -206,6 +206,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   1
   false
   1
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index 2797ad3..481f71c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -262,6 +262,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   1
   600
   
@@ -408,6 +409,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   1
   true
   
600
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 97c7cec..b6f19f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -427,6 +427,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 0245444..d1b1f1c 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -34,6 +34,8 @@
 2021
 
 false
+
+true
 
true
 6.1.26
 _
@@ -1614,6 +1616,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   false
   
${surefire.fork.timeout}
   ${maven-surefire-plugin.argLine}
diff --git a/hadoop-tools/hadoop-distcp/pom.xml 
b/hadoop-tools/hadoop-distcp/pom.xml
index 9c0795a..0c96aed 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -124,6 +124,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   1
   false
   600
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 3d7e355..333fd5a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -168,6 +168,7 @@
   org.apache.maven.plugins
   maven-surefire-plugin
   
+${ignoreTestFailure}
 false
 900
 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-18035. Ignore unit test failures to run all the unit tests from root (#3765)

2021-12-09 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 3854b59  HADOOP-18035. Ignore unit test failures to run all the unit 
tests from root (#3765)
3854b59 is described below

commit 3854b59bd1b06e9740cf33efc7844b66eec30ba4
Author: Akira Ajisaka 
AuthorDate: Fri Dec 10 01:27:27 2021 +0900

HADOOP-18035. Ignore unit test failures to run all the unit tests from root 
(#3765)

Reviewed-by: Masatake Iwasaki 
---
 hadoop-common-project/hadoop-common/pom.xml  | 1 +
 hadoop-common-project/hadoop-kms/pom.xml | 1 +
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 1 +
 hadoop-project/pom.xml   | 3 +++
 hadoop-tools/hadoop-distcp/pom.xml   | 1 +
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 1 +
 7 files changed, 10 insertions(+)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index d215b43..e0c80bc 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -1000,6 +1000,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-common-project/hadoop-kms/pom.xml 
b/hadoop-common-project/hadoop-kms/pom.xml
index 5d3f9f6..bded0b5 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -186,6 +186,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   1
   false
   1
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index e951320..e398bfb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -247,6 +247,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   1
   600
   
@@ -361,6 +362,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   1
   true
   
600
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 820879d..c2b6f95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -507,6 +507,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd;>
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   ${testsThreadCount}
   false
   ${maven-surefire-plugin.argLine} 
-DminiClusterDedicatedDirs=true
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 0c1ad0b..e33ce51 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -34,6 +34,8 @@
 2021
 
 false
+
+true
 
true
 9.4.40.v20210413
 _
@@ -1758,6 +1760,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   false
   
${surefire.fork.timeout}
   ${maven-surefire-plugin.argLine}
diff --git a/hadoop-tools/hadoop-distcp/pom.xml 
b/hadoop-tools/hadoop-distcp/pom.xml
index 223f018..72b6b5b 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -123,6 +123,7 @@
 org.apache.maven.plugins
 maven-surefire-plugin
 
+  ${ignoreTestFailure}
   1
   false
   600
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index dbddbb3..36f6cf2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -225,6 +225,7 @@
   org.apache.maven.plugins
   maven-surefire-plugin
   
+${ignoreTestFailure}
 false
 900
 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-16333. fix balancer bug when transfer an EC block (#3777)

2021-12-09 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 72ffbd9  HDFS-16333. fix balancer bug when transfer an EC block (#3777)
72ffbd9 is described below

commit 72ffbd956a6d5090a7b28c63183b032ab5467e68
Author: qinyuren <1476659...@qq.com>
AuthorDate: Thu Dec 9 23:33:03 2021 +0800

HDFS-16333. fix balancer bug when transfer an EC block (#3777)
---
 .../hadoop/hdfs/server/balancer/Dispatcher.java|  48 +++-
 .../hadoop/hdfs/server/balancer/TestBalancer.java  | 123 -
 2 files changed, 166 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 1694a12..0581793 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -490,7 +490,7 @@ public class Dispatcher {
 
   public static class DBlockStriped extends DBlock {
 
-final byte[] indices;
+private byte[] indices;
 final short dataBlockNum;
 final int cellSize;
 
@@ -527,6 +527,29 @@ public class Dispatcher {
   }
   return block.getNumBytes();
 }
+
+public void setIndices(byte[] indices) {
+  this.indices = indices;
+}
+
+/**
+ * Adjust EC block indices,it will remove the element of adjustList from 
indices.
+ * @param adjustList the list will be removed from indices
+ */
+public void adjustIndices(List adjustList) {
+  if (adjustList.isEmpty()) {
+return;
+  }
+
+  byte[] newIndices = new byte[indices.length - adjustList.size()];
+  for (int i = 0, j = 0; i < indices.length; ++i) {
+if (!adjustList.contains(i)) {
+  newIndices[j] = indices[i];
+  ++j;
+}
+  }
+  this.indices = newIndices;
+}
   }
 
   /** The class represents a desired move. */
@@ -803,7 +826,7 @@ public class Dispatcher {
  * 
  * @return the total size of the received blocks in the number of bytes.
  */
-private long getBlockList() throws IOException {
+private long getBlockList() throws IOException, IllegalArgumentException {
   final long size = Math.min(getBlocksSize, blocksToReceive);
   final BlocksWithLocations newBlksLocs =
   nnc.getBlocks(getDatanodeInfo(), size, getBlocksMinBlockSize);
@@ -840,7 +863,14 @@ public class Dispatcher {
   synchronized (block) {
 block.clearLocations();
 
+if (blkLocs instanceof StripedBlockWithLocations) {
+  // EC block may adjust indices before, avoid repeated adjustments
+  ((DBlockStriped) block).setIndices(
+  ((StripedBlockWithLocations) blkLocs).getIndices());
+}
+
 // update locations
+List adjustList = new ArrayList<>();
 final String[] datanodeUuids = blkLocs.getDatanodeUuids();
 final StorageType[] storageTypes = blkLocs.getStorageTypes();
 for (int i = 0; i < datanodeUuids.length; i++) {
@@ -848,8 +878,20 @@ public class Dispatcher {
   datanodeUuids[i], storageTypes[i]);
   if (g != null) { // not unknown
 block.addLocation(g);
+  } else if (blkLocs instanceof StripedBlockWithLocations) {
+// some datanode may not in storageGroupMap due to 
decommission operation
+// or balancer cli with "-exclude" parameter
+adjustList.add(i);
   }
 }
+
+if (!adjustList.isEmpty()) {
+  // block.locations mismatch with block.indices
+  // adjust indices to get correct internalBlock for Datanode in 
#getInternalBlock
+  ((DBlockStriped) block).adjustIndices(adjustList);
+  Preconditions.checkArgument(((DBlockStriped) 
block).indices.length
+  == block.locations.size());
+}
   }
   if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
 if (LOG.isTraceEnabled()) {
@@ -969,7 +1011,7 @@ public class Dispatcher {
 }
 blocksToReceive -= received;
 continue;
-  } catch (IOException e) {
+  } catch (IOException | IllegalArgumentException e) {
 LOG.warn("Exception while getting reportedBlock list", e);
 return;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 

[hadoop] branch branch-3.3 updated: HDFS-16333. fix balancer bug when transfer an EC block (#3777)

2021-12-09 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 0dfb4eb  HDFS-16333. fix balancer bug when transfer an EC block (#3777)
0dfb4eb is described below

commit 0dfb4eb6029d52a0fed10c69a499cbefea0af53e
Author: qinyuren <1476659...@qq.com>
AuthorDate: Thu Dec 9 23:33:03 2021 +0800

HDFS-16333. fix balancer bug when transfer an EC block (#3777)
---
 .../hadoop/hdfs/server/balancer/Dispatcher.java|  48 +++-
 .../hadoop/hdfs/server/balancer/TestBalancer.java  | 123 -
 2 files changed, 166 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 5411b5c..8be3fb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -491,7 +491,7 @@ public class Dispatcher {
 
   public static class DBlockStriped extends DBlock {
 
-final byte[] indices;
+private byte[] indices;
 final short dataBlockNum;
 final int cellSize;
 
@@ -528,6 +528,29 @@ public class Dispatcher {
   }
   return block.getNumBytes();
 }
+
+public void setIndices(byte[] indices) {
+  this.indices = indices;
+}
+
+/**
+ * Adjust EC block indices,it will remove the element of adjustList from 
indices.
+ * @param adjustList the list will be removed from indices
+ */
+public void adjustIndices(List adjustList) {
+  if (adjustList.isEmpty()) {
+return;
+  }
+
+  byte[] newIndices = new byte[indices.length - adjustList.size()];
+  for (int i = 0, j = 0; i < indices.length; ++i) {
+if (!adjustList.contains(i)) {
+  newIndices[j] = indices[i];
+  ++j;
+}
+  }
+  this.indices = newIndices;
+}
   }
 
   /** The class represents a desired move. */
@@ -804,7 +827,7 @@ public class Dispatcher {
  * 
  * @return the total size of the received blocks in the number of bytes.
  */
-private long getBlockList() throws IOException {
+private long getBlockList() throws IOException, IllegalArgumentException {
   final long size = Math.min(getBlocksSize, blocksToReceive);
   final BlocksWithLocations newBlksLocs =
   nnc.getBlocks(getDatanodeInfo(), size, getBlocksMinBlockSize);
@@ -841,7 +864,14 @@ public class Dispatcher {
   synchronized (block) {
 block.clearLocations();
 
+if (blkLocs instanceof StripedBlockWithLocations) {
+  // EC block may adjust indices before, avoid repeated adjustments
+  ((DBlockStriped) block).setIndices(
+  ((StripedBlockWithLocations) blkLocs).getIndices());
+}
+
 // update locations
+List adjustList = new ArrayList<>();
 final String[] datanodeUuids = blkLocs.getDatanodeUuids();
 final StorageType[] storageTypes = blkLocs.getStorageTypes();
 for (int i = 0; i < datanodeUuids.length; i++) {
@@ -849,8 +879,20 @@ public class Dispatcher {
   datanodeUuids[i], storageTypes[i]);
   if (g != null) { // not unknown
 block.addLocation(g);
+  } else if (blkLocs instanceof StripedBlockWithLocations) {
+// some datanode may not in storageGroupMap due to 
decommission operation
+// or balancer cli with "-exclude" parameter
+adjustList.add(i);
   }
 }
+
+if (!adjustList.isEmpty()) {
+  // block.locations mismatch with block.indices
+  // adjust indices to get correct internalBlock for Datanode in 
#getInternalBlock
+  ((DBlockStriped) block).adjustIndices(adjustList);
+  Preconditions.checkArgument(((DBlockStriped) 
block).indices.length
+  == block.locations.size());
+}
   }
   if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
 if (LOG.isTraceEnabled()) {
@@ -970,7 +1012,7 @@ public class Dispatcher {
 }
 blocksToReceive -= received;
 continue;
-  } catch (IOException e) {
+  } catch (IOException | IllegalArgumentException e) {
 LOG.warn("Exception while getting reportedBlock list", e);
 return;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 

[hadoop] branch branch-3.3 updated: YARN-11020. [UI2] No container is found for an application attempt with a single AM container. Contributed by Andras Gyori

2021-12-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 8053b0f  YARN-11020. [UI2] No container is found for an application 
attempt with a single AM container. Contributed by Andras Gyori
8053b0f is described below

commit 8053b0f2059c3ddfb80ece12c08d545a3a24eba6
Author: Szilard Nemeth 
AuthorDate: Thu Dec 9 13:02:55 2021 +0100

YARN-11020. [UI2] No container is found for an application attempt with a 
single AM container. Contributed by Andras Gyori
---
 .../src/main/webapp/app/serializers/yarn-jhs-container.js | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-container.js
index 599cf7f..8aebb71 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-jhs-container.js
@@ -45,8 +45,8 @@ export default DS.JSONAPISerializer.extend({
   },
 
   normalizeArrayResponse(store, primaryModelClass, payload/*, id, 
requestType*/) {
-
-payload = payload["containerLogsInfo"]
+// Handling single container and multiple containers case at the same time
+payload = [].concat(payload["containerLogsInfo"]);
 
 var normalizedArrayResponse = {
   data: []

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-17643 WASB : Make metadata checks case insensitive (#3102)

2021-12-09 Thread surendralilhore
This is an automated email from the ASF dual-hosted git repository.

surendralilhore pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 84cab2e  HADOOP-17643 WASB : Make metadata checks case insensitive 
(#3102)
84cab2e is described below

commit 84cab2ee2924474bcc19e2733610ffa9a9e8175a
Author: Anoop Sam John 
AuthorDate: Thu Dec 9 14:45:48 2021 +0530

HADOOP-17643 WASB : Make metadata checks case insensitive (#3102)
---
 .../fs/azure/AzureNativeFileSystemStore.java   | 76 +++---
 1 file changed, 52 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 414d2f2..627618c 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -40,6 +40,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 
 import org.apache.commons.lang3.StringUtils;
@@ -180,6 +181,11 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
*/
   public static final String KEY_USE_LOCAL_SAS_KEY_MODE = 
"fs.azure.local.sas.key.mode";
 
+  /**
+   * Config to control case sensitive metadata key checks/retrieval. If this
+   * is false, blob metadata keys will be treated case insensitive.
+   */
+  private static final String KEY_BLOB_METADATA_KEY_CASE_SENSITIVE = 
"fs.azure.blob.metadata.key.case.sensitive";
   private static final String PERMISSION_METADATA_KEY = "hdi_permission";
   private static final String OLD_PERMISSION_METADATA_KEY = "asv_permission";
   private static final String IS_FOLDER_METADATA_KEY = "hdi_isfolder";
@@ -353,6 +359,8 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 
   private String delegationToken;
 
+  private boolean metadataKeyCaseSensitive;
+
   /** The error message template when container is not accessible. */
   public static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials 
found for "
   + "account %s in the configuration, and its container %s is not "
@@ -574,6 +582,12 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   LOG.warn("Unable to initialize HBase root as an atomic rename 
directory.");
 }
 LOG.debug("Atomic rename directories: {} ", setToString(atomicRenameDirs));
+metadataKeyCaseSensitive = conf
+.getBoolean(KEY_BLOB_METADATA_KEY_CASE_SENSITIVE, true);
+if (!metadataKeyCaseSensitive) {
+  LOG.info("{} configured as false. Blob metadata will be treated case 
insensitive.",
+  KEY_BLOB_METADATA_KEY_CASE_SENSITIVE);
+}
   }
 
   /**
@@ -1618,15 +1632,24 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 blob.setMetadata(metadata);
   }
 
-  private static String getMetadataAttribute(CloudBlobWrapper blob,
+  private String getMetadataAttribute(HashMap metadata,
   String... keyAlternatives) {
-HashMap metadata = blob.getMetadata();
 if (null == metadata) {
   return null;
 }
 for (String key : keyAlternatives) {
-  if (metadata.containsKey(key)) {
-return metadata.get(key);
+  if (metadataKeyCaseSensitive) {
+if (metadata.containsKey(key)) {
+  return metadata.get(key);
+}
+  } else {
+// See HADOOP-17643 for details on why this case insensitive metadata
+// checks been added
+for (Entry entry : metadata.entrySet()) {
+  if (key.equalsIgnoreCase(entry.getKey())) {
+return entry.getValue();
+  }
+}
   }
 }
 return null;
@@ -1650,7 +1673,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   }
 
   private PermissionStatus getPermissionStatus(CloudBlobWrapper blob) {
-String permissionMetadataValue = getMetadataAttribute(blob,
+String permissionMetadataValue = getMetadataAttribute(blob.getMetadata(),
 PERMISSION_METADATA_KEY, OLD_PERMISSION_METADATA_KEY);
 if (permissionMetadataValue != null) {
   return PermissionStatusJsonSerializer.fromJSONString(
@@ -1698,19 +1721,32 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 OLD_LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
   }
 
-  private static String getLinkAttributeValue(CloudBlobWrapper blob)
+  private String getLinkAttributeValue(CloudBlobWrapper blob)
   throws UnsupportedEncodingException {
-String encodedLinkTarget = getMetadataAttribute(blob,
+String encodedLinkTarget =