[hadoop] branch trunk updated: HADOOP-18112: Implement paging during multi object delete. (#4045)

2022-03-10 Thread mthakur
This is an automated email from the ASF dual-hosted git repository.

mthakur pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 672e380  HADOOP-18112: Implement paging during multi object delete. 
(#4045)
672e380 is described below

commit 672e380c4f6ffcb0a6fee6d8263166e16b4323c2
Author: Mukund Thakur 
AuthorDate: Fri Mar 11 13:05:45 2022 +0530

HADOOP-18112: Implement paging during multi object delete. (#4045)


Multi object delete of size more than 1000 is not supported by S3 and
fails with MalformedXML error. So implementing paging of requests to
reduce the number of keys in a single request. Page size can be configured
using "fs.s3a.bulk.delete.page.size"

 Contributed By: Mukund Thakur
---
 .../main/java/org/apache/hadoop/util/Lists.java|  24 +
 .../java/org/apache/hadoop/util/TestLists.java |  44 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java|  78 ++-
 .../apache/hadoop/fs/s3a/api/RequestFactory.java   |   5 +-
 .../apache/hadoop/fs/s3a/impl/DeleteOperation.java |  45 ++---
 .../hadoop/fs/s3a/impl/OperationCallbacks.java |  12 +--
 .../apache/hadoop/fs/s3a/impl/RenameOperation.java |   8 +-
 .../hadoop/fs/s3a/impl/RequestFactoryImpl.java |   5 +-
 .../org/apache/hadoop/fs/s3a/tools/MarkerTool.java |   2 +-
 .../hadoop/fs/s3a/tools/MarkerToolOperations.java  |   9 +-
 .../fs/s3a/tools/MarkerToolOperationsImpl.java |  10 +-
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java |  38 +++-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java | 107 -
 .../fs/s3a/impl/ITestPartialRenamesDeletes.java| 105 
 .../hadoop/fs/s3a/impl/TestRequestFactory.java |   2 +-
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java  |   2 +-
 .../fs/s3a/test/MinimalOperationCallbacks.java |   9 +-
 17 files changed, 273 insertions(+), 232 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java
index b6d74ee..5d9cc05 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java
@@ -232,4 +232,28 @@ public final class Lists {
 return addAll(addTo, elementsToAdd.iterator());
   }
 
+  /**
+   * Returns consecutive sub-lists of a list, each of the same size
+   * (the final list may be smaller).
+   * @param originalList original big list.
+   * @param pageSize desired size of each sublist ( last one
+   * may be smaller)
+   * @return a list of sub lists.
+   */
+  public static  List> partition(List originalList, int 
pageSize) {
+
+Preconditions.checkArgument(originalList != null && originalList.size() > 
0,
+"Invalid original list");
+Preconditions.checkArgument(pageSize > 0, "Page size should " +
+"be greater than 0 for performing partition");
+
+List> result = new ArrayList<>();
+int i=0;
+while (i < originalList.size()) {
+  result.add(originalList.subList(i,
+  Math.min(i + pageSize, originalList.size(;
+  i = i + pageSize;
+}
+return result;
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java
index 537e378..53241da 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java
@@ -18,9 +18,11 @@
 
 package org.apache.hadoop.util;
 
+import org.assertj.core.api.Assertions;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -80,6 +82,48 @@ public class TestLists {
   }
 
   @Test
+  public void testListsPartition() {
+List list = new ArrayList<>();
+list.add("a");
+list.add("b");
+list.add("c");
+list.add("d");
+list.add("e");
+List> res = Lists.
+partition(list, 2);
+Assertions.assertThat(res)
+.describedAs("Number of partitions post partition")
+.hasSize(3);
+Assertions.assertThat(res.get(0))
+.describedAs("Number of elements in first partition")
+.hasSize(2);
+Assertions.assertThat(res.get(2))
+.describedAs("Number of elements in last partition")
+.hasSize(1);
+
+List> res2 = Lists.
+partition(list, 1);
+Assertions.assertThat(res2)
+.describedAs("Number of partitions post partition")
+.hasSize(5);
+Assertions.assertThat(res2.get(0))
+

[hadoop] branch trunk updated: YARN-11067. Resource overcommitment due to incorrect resource normalisation logical order. Contributed by Andras Gyori

2022-03-10 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ed65aa2  YARN-11067. Resource overcommitment due to incorrect resource 
normalisation logical order. Contributed by Andras Gyori
ed65aa2 is described below

commit ed65aa23240b3dd6b56e86e5f0e9d38069fb3b01
Author: Szilard Nemeth 
AuthorDate: Thu Mar 10 22:22:58 2022 +0100

YARN-11067. Resource overcommitment due to incorrect resource normalisation 
logical order. Contributed by Andras Gyori
---
 .../scheduler/capacity/ParentQueue.java| 37 -
 .../TestAbsoluteResourceConfiguration.java | 47 ++
 2 files changed, 64 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index c624aab..87ebc0b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -1294,17 +1294,24 @@ public class ParentQueue extends AbstractCSQueue {
 
   private void calculateEffectiveResourcesAndCapacity(String label,
   Resource clusterResource) {
+// Update effective resources for my self;
+if (rootQueue) {
+  Resource resourceByLabel = labelManager.getResourceByLabel(label, 
clusterResource);
+  usageTracker.getQueueResourceQuotas().setEffectiveMinResource(label, 
resourceByLabel);
+  usageTracker.getQueueResourceQuotas().setEffectiveMaxResource(label, 
resourceByLabel);
+} else {
+  super.updateEffectiveResources(clusterResource);
+}
+
+recalculateEffectiveMinRatio(label, clusterResource);
+  }
+
+  private void recalculateEffectiveMinRatio(String label, Resource 
clusterResource) {
 // For root queue, ensure that max/min resource is updated to latest
 // cluster resource.
-Resource resourceByLabel = labelManager.getResourceByLabel(label,
-clusterResource);
-
-/*
- * == Below logic are added to calculate effectiveMinRatioPerResource ==
- */
+Resource resourceByLabel = labelManager.getResourceByLabel(label, 
clusterResource);
 
-// Total configured min resources of direct children of this given parent
-// queue
+// Total configured min resources of direct children of this given parent 
queue
 Resource configuredMinResources = Resource.newInstance(0L, 0);
 for (CSQueue childQueue : getChildQueues()) {
   Resources.addTo(configuredMinResources,
@@ -1312,8 +1319,7 @@ public class ParentQueue extends AbstractCSQueue {
 }
 
 // Factor to scale down effective resource: When cluster has sufficient
-// resources, effective_min_resources will be same as configured
-// min_resources.
+// resources, effective_min_resources will be same as configured 
min_resources.
 Resource numeratorForMinRatio = null;
 if (getQueuePath().equals("root")) {
   if (!resourceByLabel.equals(Resources.none()) && 
Resources.lessThan(resourceCalculator,
@@ -1324,21 +1330,12 @@ public class ParentQueue extends AbstractCSQueue {
   if (Resources.lessThan(resourceCalculator, clusterResource,
   usageTracker.getQueueResourceQuotas().getEffectiveMinResource(label),
   configuredMinResources)) {
-numeratorForMinRatio = usageTracker.getQueueResourceQuotas()
-.getEffectiveMinResource(label);
+numeratorForMinRatio = 
usageTracker.getQueueResourceQuotas().getEffectiveMinResource(label);
   }
 }
 
 effectiveMinResourceRatio.put(label, getEffectiveMinRatio(
 configuredMinResources, numeratorForMinRatio));
-
-// Update effective resources for my self;
-if (rootQueue) {
-  usageTracker.getQueueResourceQuotas().setEffectiveMinResource(label, 
resourceByLabel);
-  usageTracker.getQueueResourceQuotas().setEffectiveMaxResource(label, 
resourceByLabel);
-} else{
-  super.updateEffectiveResources(clusterResource);
-}
   }
 
   private Map getEffectiveMinRatio(
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAbsoluteResourceConfiguration.java
 

[hadoop] branch trunk updated: YARN-10049. FIFOOrderingPolicy Improvements. Contributed by Benjamin Teke

2022-03-10 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 481da19  YARN-10049. FIFOOrderingPolicy Improvements. Contributed by 
Benjamin Teke
481da19 is described below

commit 481da19494fe13ca42651305b519e0670cafabf0
Author: Szilard Nemeth 
AuthorDate: Thu Mar 10 22:15:35 2022 +0100

YARN-10049. FIFOOrderingPolicy Improvements. Contributed by Benjamin Teke
---
 .../scheduler/policy/FifoComparator.java   |  5 ++
 .../scheduler/policy/TestFifoOrderingPolicy.java   | 91 --
 2 files changed, 71 insertions(+), 25 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
index 112c50f..c62b738 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
@@ -29,6 +29,11 @@ public class FifoComparator
 @Override
   public int compare(SchedulableEntity r1, SchedulableEntity r2) {
 int res = r1.compareInputOrderTo(r2);
+
+if (res == 0) {
+  res = (int) Math.signum(r1.getStartTime() - r2.getStartTime());
+}
+
 return res;
   }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
index 7ec2c01..62bc712 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
@@ -18,16 +18,19 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
 
-import java.util.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
-import org.junit.Assert;
-import org.junit.Test;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
 
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.junit.Assert;
+import org.junit.Test;
 
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class TestFifoOrderingPolicy {
+public
+class TestFifoOrderingPolicy {
   
   @Test
   public void testFifoOrderingPolicy() {
@@ -36,13 +39,17 @@ public class TestFifoOrderingPolicy {
 MockSchedulableEntity r1 = new MockSchedulableEntity();
 MockSchedulableEntity r2 = new MockSchedulableEntity();
 
-assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(0);
+assertEquals("The comparator should return 0 because the entities are 
created with " +
+"the same values.", 0,
+policy.getComparator().compare(r1, r2));
 
 r1.setSerial(1);
-assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(1);
+assertEquals("The lhs entity has a larger serial, the comparator return " +
+"value should be 1.", 1, policy.getComparator().compare(r1, r2));
 
 r2.setSerial(2);
-assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(-1);
+Assert.assertEquals("The rhs entity has a larger serial, the comparator 
return " +
+"value should be -1.", -1, policy.getComparator().compare(r1, r2));
   }
   
   @Test
@@ -63,46 +70,80 @@ public class TestFifoOrderingPolicy {
 schedOrder.addSchedulableEntity(msp3);
 
 //Assignment, oldest to youngest
-
checkSerials(schedOrder.getAssignmentIterator(IteratorSelector.EMPTY_ITERATOR_SELECTOR),
 new long[]{1, 2, 3});
+checkSerials(Arrays.asList(1L, 2L, 3L), schedOrder.getAssignmentIterator(
+IteratorSelector.EMPTY_ITERATOR_SELECTOR));
 
 //Preemption, youngest to oldest
-checkSerials(schedOrder.getPreemptionIterator(), new long[]{3, 2, 1});
+checkSerials(Arrays.asList(3L, 2L, 1L), 
schedOrder.getPreemptionIterator());
   }
   
-  public void checkSerials(Iterator si, 
-  long[] serials) {
-for (int i = 0;i < 

[hadoop] branch trunk updated: YARN-11036. Do not inherit from TestRMWebServicesCapacitySched. Contributed by Tamas Domok

2022-03-10 Thread quapaw
This is an automated email from the ASF dual-hosted git repository.

quapaw pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 383b734  YARN-11036. Do not inherit from 
TestRMWebServicesCapacitySched. Contributed by Tamas Domok
383b734 is described below

commit 383b73417df80028011d229dce9daf8e4ecbdb49
Author: 9uapaw 
AuthorDate: Thu Mar 10 13:11:19 2022 +0100

YARN-11036. Do not inherit from TestRMWebServicesCapacitySched. Contributed 
by Tamas Domok
---
 .../webapp/TestRMWebServicesCapacitySched.java | 47 ++--
 ...estRMWebServicesCapacitySchedDynamicConfig.java | 65 ++
 .../TestRMWebServicesSchedulerActivities.java  | 31 +--
 3 files changed, 73 insertions(+), 70 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index 2be9b54..b9ce10a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -77,42 +77,37 @@ import static org.junit.Assert.assertEquals;
 
 public class TestRMWebServicesCapacitySched extends JerseyTestBase {
 
-  protected static MockRM rm;
+  private MockRM rm;
+
+  public static class WebServletModule extends ServletModule {
+private final MockRM rm;
+
+WebServletModule(MockRM rm) {
+  this.rm = rm;
+}
 
-  private static class WebServletModule extends ServletModule {
 @Override
 protected void configureServlets() {
   bind(JAXBContextResolver.class);
   bind(RMWebServices.class);
   bind(GenericExceptionHandler.class);
-  CapacitySchedulerConfiguration csConf = new 
CapacitySchedulerConfiguration(
-  new Configuration(false));
-  setupQueueConfiguration(csConf);
-  YarnConfiguration conf = new YarnConfiguration(csConf);
-  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
-  ResourceScheduler.class);
-  conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
-  YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
-  rm = new MockRM(conf);
   bind(ResourceManager.class).toInstance(rm);
   serve("/*").with(GuiceContainer.class);
 }
   }
 
   public TestRMWebServicesCapacitySched() {
-super(new WebAppDescriptor.Builder(
-"org.apache.hadoop.yarn.server.resourcemanager.webapp")
-.contextListenerClass(GuiceServletConfig.class)
-.filterClass(com.google.inject.servlet.GuiceFilter.class)
-.contextPath("jersey-guice-filter").servletPath("/").build());
+super(createWebAppDescriptor());
   }
 
   @Before
   @Override
   public void setUp() throws Exception {
 super.setUp();
+rm = createMockRM(new CapacitySchedulerConfiguration(
+new Configuration(false)));
 GuiceServletConfig.setInjector(
-Guice.createInjector(new WebServletModule()));
+Guice.createInjector(new WebServletModule(rm)));
   }
 
   public static void setupQueueConfiguration(
@@ -389,4 +384,22 @@ public class TestRMWebServicesCapacitySched extends 
JerseyTestBase {
   Assert.fail("overwrite should not fail " + e.getMessage());
 }
   }
+
+  public static WebAppDescriptor createWebAppDescriptor() {
+return new WebAppDescriptor.Builder(
+TestRMWebServicesCapacitySched.class.getPackage().getName())
+.contextListenerClass(GuiceServletConfig.class)
+.filterClass(com.google.inject.servlet.GuiceFilter.class)
+.contextPath("jersey-guice-filter").servletPath("/").build();
+  }
+
+  public static MockRM createMockRM(CapacitySchedulerConfiguration csConf) {
+setupQueueConfiguration(csConf);
+YarnConfiguration conf = new YarnConfiguration(csConf);
+conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ResourceScheduler.class);
+conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
+return new MockRM(conf);
+  }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java
 

[hadoop] branch branch-3.2.3 updated: HADOOP-17718. Explicitly set locale in the Dockerfile. (#3034)

2022-03-10 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-3.2.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2.3 by this push:
 new b3a75fd  HADOOP-17718. Explicitly set locale in the Dockerfile. (#3034)
b3a75fd is described below

commit b3a75fd00f5cab38933f1a730b36ba77390310c7
Author: Wei-Chiu Chuang 
AuthorDate: Fri May 21 09:26:50 2021 -0700

HADOOP-17718. Explicitly set locale in the Dockerfile. (#3034)

Reviewed-by: Akira Ajisaka 
(cherry picked from commit ad923ad5642b2b11357fbee4277f3435300a19c5)

Conflicts:
dev-support/docker/Dockerfile_centos_7
dev-support/docker/Dockerfile_centos_8
(cherry picked from commit 5e09d38b2505db70c712b8b6c93635220325156f)

Conflicts:
dev-support/docker/Dockerfile_aarch64
(cherry picked from commit c531613cc9a52c9348ec11a93317be41f674d4d7)
---
 dev-support/docker/Dockerfile | 4 
 1 file changed, 4 insertions(+)

diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index bb06723..2c7015c 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -85,6 +85,10 @@ RUN apt-get -q update \
 && apt-get clean \
 && rm -rf /var/lib/apt/lists/*
 
+RUN locale-gen en_US.UTF-8
+ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'
+ENV PYTHONIOENCODING=utf-8
+
 ##
 # Set env vars required to build Hadoop
 ##

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-17718. Explicitly set locale in the Dockerfile. (#3034)

2022-03-10 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new c531613  HADOOP-17718. Explicitly set locale in the Dockerfile. (#3034)
c531613 is described below

commit c531613cc9a52c9348ec11a93317be41f674d4d7
Author: Wei-Chiu Chuang 
AuthorDate: Fri May 21 09:26:50 2021 -0700

HADOOP-17718. Explicitly set locale in the Dockerfile. (#3034)

Reviewed-by: Akira Ajisaka 
(cherry picked from commit ad923ad5642b2b11357fbee4277f3435300a19c5)

Conflicts:
dev-support/docker/Dockerfile_centos_7
dev-support/docker/Dockerfile_centos_8
(cherry picked from commit 5e09d38b2505db70c712b8b6c93635220325156f)

Conflicts:
dev-support/docker/Dockerfile_aarch64
---
 dev-support/docker/Dockerfile | 4 
 1 file changed, 4 insertions(+)

diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index bb06723..2c7015c 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -85,6 +85,10 @@ RUN apt-get -q update \
 && apt-get clean \
 && rm -rf /var/lib/apt/lists/*
 
+RUN locale-gen en_US.UTF-8
+ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en' LC_ALL='en_US.UTF-8'
+ENV PYTHONIOENCODING=utf-8
+
 ##
 # Set env vars required to build Hadoop
 ##

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2.3 updated: YARN-9783. Remove low-level zookeeper test to be able to build Hadoop against zookeeper 3.5.5. Contributed by Mate Szalay-Beko.

2022-03-10 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-3.2.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2.3 by this push:
 new 4a51bd8  YARN-9783. Remove low-level zookeeper test to be able to 
build Hadoop against zookeeper 3.5.5. Contributed by Mate Szalay-Beko.
4a51bd8 is described below

commit 4a51bd8079d0cacdb7b28d4fc6632e0ab82f691e
Author: Akira Ajisaka 
AuthorDate: Fri Aug 30 10:13:10 2019 +0900

YARN-9783. Remove low-level zookeeper test to be able to build Hadoop 
against zookeeper 3.5.5. Contributed by Mate Szalay-Beko.

(cherry picked from commit dc0acceabb6a5189974109cfea09f598c2a99d14)
---
 .../hadoop/registry/secure/TestSecureRegistry.java | 34 --
 1 file changed, 34 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
index 9d5848e..27d32ea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
@@ -24,16 +24,12 @@ import 
org.apache.hadoop.registry.client.impl.zk.ZKPathDumper;
 import org.apache.hadoop.registry.client.impl.zk.CuratorService;
 import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
 import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.Login;
-import org.apache.zookeeper.server.ZooKeeperSaslServer;
-import org.apache.zookeeper.server.auth.SaslServerCallbackHandler;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.LoginContext;
 
 import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
@@ -58,36 +54,6 @@ public class TestSecureRegistry extends 
AbstractSecureRegistryTest {
 RegistrySecurity.clearZKSaslClientProperties();
   }
 
-  /**
-  * this is a cut and paste of some of the ZK internal code that was
-   * failing on windows and swallowing its exceptions
-   */
-  @Test
-  public void testLowlevelZKSaslLogin() throws Throwable {
-RegistrySecurity.bindZKToServerJAASContext(ZOOKEEPER_SERVER_CONTEXT);
-String serverSection =
-System.getProperty(ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-ZooKeeperSaslServer.DEFAULT_LOGIN_CONTEXT_NAME);
-assertEquals(ZOOKEEPER_SERVER_CONTEXT, serverSection);
-
-AppConfigurationEntry entries[];
-entries = javax.security.auth.login.Configuration.getConfiguration()
- .getAppConfigurationEntry(
- serverSection);
-
-assertNotNull("null entries", entries);
-
-SaslServerCallbackHandler saslServerCallbackHandler =
-new SaslServerCallbackHandler(
-javax.security.auth.login.Configuration.getConfiguration());
-Login login = new Login(serverSection, saslServerCallbackHandler);
-try {
-  login.startThreadIfNeeded();
-} finally {
-  login.shutdown();
-}
-  }
-
   @Test
   public void testCreateSecureZK() throws Throwable {
 startSecureZK();

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org