[hadoop] branch branch-3.3 updated (054afa11808 -> 3400e8257e1)

2023-04-03 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 054afa11808 HADOOP-18647. x-ms-client-request-id to identify the retry 
of an API. (#5437)
 add 3400e8257e1 HADOOP-18680: Insufficient heap during full test runs in 
Docker container on branch-3.3.

No new revisions were added by this update.

Summary of changes:
 dev-support/docker/Dockerfile | 2 +-
 dev-support/docker/Dockerfile_aarch64 | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (811441d5bc4 -> 14c5810d5ef)

2023-04-03 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 811441d5bc4 HDFS-16951. Add description of GETSERVERDEFAULTS to 
WebHDFS doc (#5491)
 add 14c5810d5ef HADOOP-18680: Insufficient heap during full test runs in 
Docker container.

No new revisions were added by this update.

Summary of changes:
 dev-support/docker/Dockerfile_aarch64 | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-18582. skip unnecessary cleanup logic in distcp (#5251)

2023-01-24 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 5cd006455d3 HADOOP-18582. skip unnecessary cleanup logic in distcp 
(#5251)
5cd006455d3 is described below

commit 5cd006455d3318f186f5d57df1f3e8209664b1d7
Author: kevin wan <610379...@qq.com>
AuthorDate: Wed Jan 25 07:49:32 2023 +0800

HADOOP-18582. skip unnecessary cleanup logic in distcp (#5251)

Co-authored-by: 万康 
Reviewed-by: Steve Loughran 
Signed-off-by: Ayush Saxena 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 3b7b79b37ae1045e413de309789fbb400817a081)
---
 .../apache/hadoop/tools/mapred/CopyCommitter.java  | 13 +++-
 .../hadoop/tools/mapred/TestCopyCommitter.java | 71 ++
 2 files changed, 82 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 2272781f724..e5c74094e90 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -149,9 +149,18 @@ public class CopyCommitter extends FileOutputCommitter {
   }
 
   private void cleanupTempFiles(JobContext context) {
-try {
-  Configuration conf = context.getConfiguration();
+Configuration conf = context.getConfiguration();
+
+final boolean directWrite = conf.getBoolean(
+DistCpOptionSwitch.DIRECT_WRITE.getConfigLabel(), false);
+final boolean append = conf.getBoolean(
+DistCpOptionSwitch.APPEND.getConfigLabel(), false);
+final boolean useTempTarget = !append && !directWrite;
+if (!useTempTarget) {
+  return;
+}
 
+try {
   Path targetWorkPath = new 
Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
   FileSystem targetFS = targetWorkPath.getFileSystem(conf);
 
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
index 599f3ec2db6..bda80a3d25e 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.tools.mapred;
 
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -580,6 +581,76 @@ public class TestCopyCommitter {
 }
   }
 
+  @Test
+  public void testCommitWithCleanupTempFiles() throws IOException {
+testCommitWithCleanup(true, false);
+testCommitWithCleanup(false, true);
+testCommitWithCleanup(true, true);
+testCommitWithCleanup(false, false);
+  }
+
+  private void testCommitWithCleanup(boolean append, boolean 
directWrite)throws IOException {
+TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
+JobID jobID = taskAttemptContext.getTaskAttemptID().getJobID();
+JobContext jobContext = new JobContextImpl(
+taskAttemptContext.getConfiguration(),
+jobID);
+Configuration conf = jobContext.getConfiguration();
+
+String sourceBase;
+String targetBase;
+FileSystem fs = null;
+try {
+  fs = FileSystem.get(conf);
+  sourceBase = "/tmp1/" + rand.nextLong();
+  targetBase = "/tmp1/" + rand.nextLong();
+
+  DistCpOptions options = new DistCpOptions.Builder(
+  Collections.singletonList(new Path(sourceBase)),
+  new Path("/out"))
+  .withAppend(append)
+  .withSyncFolder(true)
+  .withDirectWrite(directWrite)
+  .build();
+  options.appendToConf(conf);
+
+  DistCpContext context = new DistCpContext(options);
+  context.setTargetPathExists(false);
+
+
+  conf.set(CONF_LABEL_TARGET_WORK_PATH, targetBase);
+  conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase);
+
+  Path tempFilePath = getTempFile(targetBase, taskAttemptContext);
+  createDirectory(fs, tempFilePath);
+
+  OutputCommitter committer = new CopyCommitter(
+  null, taskAttemptContext);
+  committer.commitJob(jobContext);
+
+  if (append || directWrite) {
+ContractTestUtils.assertPathExists(fs, "Temp files should not be 
cleanup with append or direct option",
+tempFilePath);
+  } else {
+ContractTestUtils.assertPathDoesNotExist(
+fs,
+"Temp files should be clean up without append or direct o

[hadoop] branch trunk updated (3f767a61b1d -> 3b7b79b37ae)

2023-01-24 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 3f767a61b1d YARN-8900. [Follow Up] Fix 
FederationInterceptorREST#invokeConcurrent Inaccurate Order of Subclusters. 
(#5260)
 add 3b7b79b37ae HADOOP-18582. skip unnecessary cleanup logic in distcp 
(#5251)

No new revisions were added by this update.

Summary of changes:
 .../apache/hadoop/tools/mapred/CopyCommitter.java  | 13 +++-
 .../hadoop/tools/mapred/TestCopyCommitter.java | 71 ++
 2 files changed, 82 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-16891 Avoid the overhead of copy-on-write exception list while loading inodes sub sections in parallel (#5300)

2023-01-18 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 63443be5f4d HDFS-16891 Avoid the overhead of copy-on-write exception 
list while loading inodes sub sections in parallel (#5300)
63443be5f4d is described below

commit 63443be5f4ddb2751c0bb1898362b4be8f9909a2
Author: Viraj Jasani 
AuthorDate: Wed Jan 18 13:13:41 2023 -0800

HDFS-16891 Avoid the overhead of copy-on-write exception list while loading 
inodes sub sections in parallel (#5300)

Reviewed-by: Stephen O'Donnell 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 04f3573f6ad07c64c1e2cf8a82fb3e9263366541)
---
 .../hadoop/hdfs/server/namenode/FSImageFormatPBINode.java | 11 ---
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 0a69c99cab8..26df995e552 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -23,9 +23,9 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -227,8 +227,7 @@ public final class FSImageFormatPBINode {
   LOG.info("Loading the INodeDirectory section in parallel with {} sub-" +
   "sections", sections.size());
   CountDownLatch latch = new CountDownLatch(sections.size());
-  final CopyOnWriteArrayList exceptions =
-  new CopyOnWriteArrayList<>();
+  final List exceptions = Collections.synchronizedList(new 
ArrayList<>());
   for (FileSummary.Section s : sections) {
 service.submit(() -> {
   InputStream ins = null;
@@ -237,8 +236,7 @@ public final class FSImageFormatPBINode {
 compressionCodec);
 loadINodeDirectorySection(ins);
   } catch (Exception e) {
-LOG.error("An exception occurred loading INodeDirectories in " +
-"parallel", e);
+LOG.error("An exception occurred loading INodeDirectories in 
parallel", e);
 exceptions.add(new IOException(e));
   } finally {
 latch.countDown();
@@ -424,8 +422,7 @@ public final class FSImageFormatPBINode {
   long expectedInodes = 0;
   CountDownLatch latch = new CountDownLatch(sections.size());
   AtomicInteger totalLoaded = new AtomicInteger(0);
-  final CopyOnWriteArrayList exceptions =
-  new CopyOnWriteArrayList<>();
+  final List exceptions = Collections.synchronizedList(new 
ArrayList<>());
 
   for (int i=0; i < sections.size(); i++) {
 FileSummary.Section s = sections.get(i);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (442a5fb285a -> 04f3573f6ad)

2023-01-18 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 442a5fb285a YARN-11320. [Federation] Add getSchedulerInfo REST APIs 
for Router. (#5217)
 add 04f3573f6ad HDFS-16891 Avoid the overhead of copy-on-write exception 
list while loading inodes sub sections in parallel (#5300)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/hdfs/server/namenode/FSImageFormatPBINode.java | 11 ---
 1 file changed, 4 insertions(+), 7 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated (8973a596467 -> 46820a7c0ae)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 8973a596467 MAPREDUCE-7375 JobSubmissionFiles don't set right 
permission after mkdirs (#4237)
 add 46820a7c0ae HDFS-16887 Log start and end of phase/step in startup 
progress (#5292)

No new revisions were added by this update.

Summary of changes:
 .../hdfs/server/namenode/startupprogress/PhaseTracking.java  | 12 
 .../server/namenode/startupprogress/StartupProgress.java | 10 ++
 .../hadoop/hdfs/server/namenode/startupprogress/Step.java| 11 +++
 .../hdfs/server/namenode/startupprogress/StepTracking.java   | 11 +++
 4 files changed, 44 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-16887 Log start and end of phase/step in startup progress (#5292)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new c5cf845d785 HDFS-16887 Log start and end of phase/step in startup 
progress (#5292)
c5cf845d785 is described below

commit c5cf845d7851f56d1b9763f5951f00fcbe6f2295
Author: Viraj Jasani 
AuthorDate: Thu Jan 12 14:26:52 2023 -0800

HDFS-16887 Log start and end of phase/step in startup progress (#5292)

Signed-off-by: Chris Nauroth 
(cherry picked from commit 1263e024b9744574bf4042b2f479a749c340fbcd)
---
 .../hdfs/server/namenode/startupprogress/PhaseTracking.java  | 12 
 .../server/namenode/startupprogress/StartupProgress.java | 10 ++
 .../hadoop/hdfs/server/namenode/startupprogress/Step.java| 11 +++
 .../hdfs/server/namenode/startupprogress/StepTracking.java   | 11 +++
 4 files changed, 44 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java
index 3f1d9030297..b01a4c2845f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/PhaseTracking.java
@@ -20,6 +20,7 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 /**
@@ -43,4 +44,15 @@ final class PhaseTracking extends AbstractTracking {
 }
 return clone;
   }
+
+  @Override
+  public String toString() {
+return new ToStringBuilder(this)
+.append("file", file)
+.append("size", size)
+.append("steps", steps)
+.append("beginTime", beginTime)
+.append("endTime", endTime)
+.toString();
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java
index 6249a84e7f9..0ca338b34b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java
@@ -24,6 +24,9 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 
 /**
@@ -48,6 +51,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public class StartupProgress {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(StartupProgress.class);
+
   // package-private for access by StartupProgressView
   final Map phases =
 new ConcurrentHashMap();
@@ -81,6 +87,7 @@ public class StartupProgress {
 if (!isComplete()) {
   phases.get(phase).beginTime = monotonicNow();
 }
+LOG.debug("Beginning of the phase: {}", phase);
   }
 
   /**
@@ -94,6 +101,7 @@ public class StartupProgress {
 if (!isComplete(phase)) {
   lazyInitStep(phase, step).beginTime = monotonicNow();
 }
+LOG.debug("Beginning of the step. Phase: {}, Step: {}", phase, step);
   }
 
   /**
@@ -105,6 +113,7 @@ public class StartupProgress {
 if (!isComplete()) {
   phases.get(phase).endTime = monotonicNow();
 }
+LOG.debug("End of the phase: {}", phase);
   }
 
   /**
@@ -118,6 +127,7 @@ public class StartupProgress {
 if (!isComplete(phase)) {
   lazyInitStep(phase, step).endTime = monotonicNow();
 }
+LOG.debug("End of the step. Phase: {}, Step: {}", phase, step);
   }
 
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java
index 0baf99d994e..5dee13d2a5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/Step.java
@@ -21,6 +21,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.commons.lang3.builder.CompareToBuilder

[hadoop] branch trunk updated (36bf54aba0f -> 1263e024b97)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 36bf54aba0f MAPREDUCE-7375 JobSubmissionFiles don't set right 
permission after mkdirs (#4237)
 add 1263e024b97 HDFS-16887 Log start and end of phase/step in startup 
progress (#5292)

No new revisions were added by this update.

Summary of changes:
 .../hdfs/server/namenode/startupprogress/PhaseTracking.java  | 12 
 .../server/namenode/startupprogress/StartupProgress.java | 10 ++
 .../hadoop/hdfs/server/namenode/startupprogress/Step.java| 11 +++
 .../hdfs/server/namenode/startupprogress/StepTracking.java   | 11 +++
 4 files changed, 44 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: MAPREDUCE-7375 JobSubmissionFiles don't set right permission after mkdirs (#4237)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new eef2fdcc29e MAPREDUCE-7375 JobSubmissionFiles don't set right 
permission after mkdirs (#4237)
eef2fdcc29e is described below

commit eef2fdcc29e949a8001bc075219e68de5d4832bd
Author: skysiders <64545691+skysid...@users.noreply.github.com>
AuthorDate: Fri Jan 13 05:48:29 2023 +0800

MAPREDUCE-7375 JobSubmissionFiles don't set right permission after mkdirs 
(#4237)

Signed-off-by: Chris Nauroth 
(cherry picked from commit 36bf54aba0fefa0f3e94d94f836ab054d31ec5c9)
---
 .../hadoop/mapreduce/JobSubmissionFiles.java   |  2 +-
 .../hadoop/mapreduce/TestJobSubmissionFiles.java   | 25 ++
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
index f6e66db2369..fffcb896091 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java
@@ -159,7 +159,7 @@ public class JobSubmissionFiles {
 fs.setPermission(stagingArea, JOB_DIR_PERMISSION);
   }
 } catch (FileNotFoundException e) {
-  fs.mkdirs(stagingArea, new FsPermission(JOB_DIR_PERMISSION));
+  FileSystem.mkdirs(fs, stagingArea, new FsPermission(JOB_DIR_PERMISSION));
 }
 return stagingArea;
   }
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobSubmissionFiles.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobSubmissionFiles.java
index ab3f7a0a937..6e9c80813fc 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobSubmissionFiles.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobSubmissionFiles.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
@@ -33,6 +34,8 @@ import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 /**
  * Tests for JobSubmissionFiles Utility class.
  */
@@ -139,4 +142,26 @@ public class TestJobSubmissionFiles {
 assertEquals(stagingPath,
 JobSubmissionFiles.getStagingDir(cluster, conf, user));
   }
+
+  @Test
+  public void testDirPermission() throws Exception {
+Cluster cluster = mock(Cluster.class);
+HdfsConfiguration conf = new HdfsConfiguration();
+conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "700");
+MiniDFSCluster dfsCluster = null;
+try {
+  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+  FileSystem fs = dfsCluster.getFileSystem();
+  UserGroupInformation user = UserGroupInformation
+  .createUserForTesting(USER_1_SHORT_NAME, GROUP_NAMES);
+  Path stagingPath = new Path(fs.getUri().toString() + 
"/testDirPermission");
+  when(cluster.getStagingAreaDir()).thenReturn(stagingPath);
+  Path res = JobSubmissionFiles.getStagingDir(cluster, conf, user);
+  assertEquals(new FsPermission(0700), 
fs.getFileStatus(res).getPermission());
+} finally {
+  if (dfsCluster != null) {
+dfsCluster.shutdown();
+  }
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated (04984e68540 -> 8973a596467)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 04984e68540 HADOOP-18591. Fix a typo in Trash (#5291)
 add 8973a596467 MAPREDUCE-7375 JobSubmissionFiles don't set right 
permission after mkdirs (#4237)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/mapreduce/JobSubmissionFiles.java   |  2 +-
 .../hadoop/mapreduce/TestJobSubmissionFiles.java   | 25 ++
 2 files changed, 26 insertions(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (a90e424d9ff -> 36bf54aba0f)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from a90e424d9ff HADOOP-18591. Fix a typo in Trash (#5291)
 add 36bf54aba0f MAPREDUCE-7375 JobSubmissionFiles don't set right 
permission after mkdirs (#4237)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/mapreduce/JobSubmissionFiles.java   |  2 +-
 .../hadoop/mapreduce/TestJobSubmissionFiles.java   | 25 ++
 2 files changed, 26 insertions(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated (a840c2e11c1 -> 04984e68540)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from a840c2e11c1 HDFS-16872. Fix log throttling by declaring 
LogThrottlingHelper as static members (#5246)
 add 04984e68540 HADOOP-18591. Fix a typo in Trash (#5291)

No new revisions were added by this update.

Summary of changes:
 .../hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-18591. Fix a typo in Trash (#5291)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new f5e9901e6d0 HADOOP-18591. Fix a typo in Trash (#5291)
f5e9901e6d0 is described below

commit f5e9901e6d03a6b9f3a1f8c1ed996271e23856fd
Author: huangxiaoping <35296098+huangxiaopin...@users.noreply.github.com>
AuthorDate: Fri Jan 13 05:21:21 2023 +0800

HADOOP-18591. Fix a typo in Trash (#5291)

Signed-off-by: Tao Li 
Signed-off-by: Chris Nauroth 
(cherry picked from commit a90e424d9ff30a0510e7a29adc01ebdc7754a20e)
---
 .../hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
index 5c5fa0237ea..73749dd2549 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
@@ -69,7 +69,7 @@ public class Trash extends Configured {
* Hence we get the file system of the fully-qualified resolved-path and
* then move the path p to the trashbin in that volume,
* @param fs - the filesystem of path p
-   * @param p - the  path being deleted - to be moved to trasg
+   * @param p - the path being deleted - to be moved to trash
* @param conf - configuration
* @return false if the item is already in the trash or trash is disabled
* @throws IOException on error


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-18591. Fix a typo in Trash (#5291)

2023-01-12 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a90e424d9ff HADOOP-18591. Fix a typo in Trash (#5291)
a90e424d9ff is described below

commit a90e424d9ff30a0510e7a29adc01ebdc7754a20e
Author: huangxiaoping <35296098+huangxiaopin...@users.noreply.github.com>
AuthorDate: Fri Jan 13 05:21:21 2023 +0800

HADOOP-18591. Fix a typo in Trash (#5291)

Signed-off-by: Tao Li 
Signed-off-by: Chris Nauroth 
---
 .../hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
index 5c5fa0237ea..73749dd2549 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
@@ -69,7 +69,7 @@ public class Trash extends Configured {
* Hence we get the file system of the fully-qualified resolved-path and
* then move the path p to the trashbin in that volume,
* @param fs - the filesystem of path p
-   * @param p - the  path being deleted - to be moved to trasg
+   * @param p - the path being deleted - to be moved to trash
* @param conf - configuration
* @return false if the item is already in the trash or trash is disabled
* @throws IOException on error


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated (fbb9734dd42 -> c760188b881)

2023-01-09 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from fbb9734dd42 HADOOP-18586. Update the year to 2023. (#5265). 
Contributed by Ayush Saxena.
 add c760188b881 HADOOP-18590. Publish SBOM artifacts (#5281)

No new revisions were added by this update.

Summary of changes:
 pom.xml | 18 ++
 1 file changed, 18 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-18590. Publish SBOM artifacts (#5281)

2023-01-09 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new b6fd3223078 HADOOP-18590. Publish SBOM artifacts (#5281)
b6fd3223078 is described below

commit b6fd32230780b15bd4b1fc8e1d32867de8083da3
Author: Dongjoon Hyun 
AuthorDate: Mon Jan 9 16:41:06 2023 -0800

HADOOP-18590. Publish SBOM artifacts (#5281)

Signed-off-by: Chris Nauroth 
(cherry picked from commit 6f99558c2eda5f82e14919789000add91dc8673d)
---
 pom.xml | 18 ++
 1 file changed, 18 insertions(+)

diff --git a/pom.xml b/pom.xml
index 324b7a28dc1..a56adec35d3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -116,6 +116,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
https://maven.apache.org/x
 1.4.3
 4.2.2
 4.2.0
+2.7.3
 
 bash
 
@@ -379,6 +380,19 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
https://maven.apache.org/x
 
   
 
+
+  org.cyclonedx
+  cyclonedx-maven-plugin
+  ${cyclonedx.version}
+  
+
+  package
+  
+makeBom
+  
+
+  
+
   
 
 
@@ -486,6 +500,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
https://maven.apache.org/x
 com.github.spotbugs
 spotbugs-maven-plugin
   
+  
+org.cyclonedx
+cyclonedx-maven-plugin
+  
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (b56d483258a -> 6f99558c2ed)

2023-01-09 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from b56d483258a HDFS-16876: Changes cleanup of shared RouterStateIdContext 
to be driven by namenodeResolver data. (#5282)
 add 6f99558c2ed HADOOP-18590. Publish SBOM artifacts (#5281)

No new revisions were added by this update.

Summary of changes:
 pom.xml | 18 ++
 1 file changed, 18 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-18587: upgrade to jettison 1.5.3 due to cve (#5270)

2023-01-06 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new f856611121b HADOOP-18587: upgrade to jettison 1.5.3 due to cve (#5270)
f856611121b is described below

commit f856611121b6f51d3fc1b1857ebd1dae235427fc
Author: PJ Fanning 
AuthorDate: Sat Jan 7 00:35:50 2023 +0100

HADOOP-18587: upgrade to jettison 1.5.3 due to cve (#5270)

Signed-off-by: Chris Nauroth 
(cherry picked from commit b9eb760ed238a2ae57a0dbb8b7e95f54589325f2)
---
 LICENSE-binary | 2 +-
 hadoop-project/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index f88b2b96f0b..7fcf8f3d983 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -345,7 +345,7 @@ org.apache.kerby:kerby-util:1.0.1
 org.apache.kerby:kerby-xdr:1.0.1
 org.apache.kerby:token-provider:1.0.1
 org.apache.yetus:audience-annotations:0.5.0
-org.codehaus.jettison:jettison:1.5.1
+org.codehaus.jettison:jettison:1.5.3
 org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
 org.eclipse.jetty:jetty-http:9.4.48.v20220622
 org.eclipse.jetty:jetty-io:9.4.48.v20220622
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 2cdcfcf5598..e08f1bd8564 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1505,7 +1505,7 @@
   
 org.codehaus.jettison
 jettison
-1.5.1
+1.5.3
 
   
 stax


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-18587: upgrade to jettison 1.5.3 due to cve (#5270)

2023-01-06 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b9eb760ed23 HADOOP-18587: upgrade to jettison 1.5.3 due to cve (#5270)
b9eb760ed23 is described below

commit b9eb760ed238a2ae57a0dbb8b7e95f54589325f2
Author: PJ Fanning 
AuthorDate: Sat Jan 7 00:35:50 2023 +0100

HADOOP-18587: upgrade to jettison 1.5.3 due to cve (#5270)

Signed-off-by: Chris Nauroth 
---
 LICENSE-binary | 2 +-
 hadoop-project/pom.xml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index 142f751a309..876b88f82a7 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -343,7 +343,7 @@ org.apache.kerby:token-provider:2.0.2
 org.apache.solr:solr-solrj:8.8.2
 org.apache.yetus:audience-annotations:0.5.0
 org.apache.zookeeper:zookeeper:3.6.3
-org.codehaus.jettison:jettison:1.5.1
+org.codehaus.jettison:jettison:1.5.3
 org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
 org.eclipse.jetty:jetty-http:9.4.48.v20220622
 org.eclipse.jetty:jetty-io:9.4.48.v20220622
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 8c3c790a38d..c0801330bc7 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1529,7 +1529,7 @@
   
 org.codehaus.jettison
 jettison
-1.5.1
+1.5.3
 
   
 stax


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated (766da519a14 -> 0ac7acaabe0)

2022-12-28 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 766da519a14 YARN-11392 Audit Log missing in ClientRMService (#5250). 
Contributed by Beibei Zhao.
 add 0ac7acaabe0 YARN-11388: Prevent resource leaks in TestClientRMService. 
(#5187)

No new revisions were added by this update.

Summary of changes:
 .../resourcemanager/TestClientRMService.java   | 350 ++---
 1 file changed, 160 insertions(+), 190 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: YARN-11388: Prevent resource leaks in TestClientRMService. (#5187)

2022-12-28 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 1f270d8a5e9 YARN-11388: Prevent resource leaks in TestClientRMService. 
(#5187)
1f270d8a5e9 is described below

commit 1f270d8a5e92f5f786125e8390bcd22156b0d331
Author: Chris Nauroth 
AuthorDate: Wed Dec 28 11:00:27 2022 -0800

YARN-11388: Prevent resource leaks in TestClientRMService. (#5187)

Signed-off-by: Shilun Fan 
(cherry picked from commit 6b67373d10717602da18ae701e8bc7f20dbb54d6)
---
 .../resourcemanager/TestClientRMService.java   | 350 ++---
 1 file changed, 160 insertions(+), 190 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 8307f88c556..418efbe2f84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -59,9 +60,11 @@ import org.apache.commons.io.FileUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
@@ -202,12 +205,17 @@ public class TestClientRMService {
   .getRecordFactory(null);
 
   private String appType = "MockApp";
-  
+
   private final static String QUEUE_1 = "Q-1";
   private final static String QUEUE_2 = "Q-2";
   private final static String APPLICATION_TAG_SC_PREPROCESSOR ="mytag:foo";
   private File resourceTypesFile = null;
 
+  private Configuration conf;
+  private ResourceManager resourceManager;
+  private YarnRPC rpc;
+  private ApplicationClientProtocol client;
+
   @Test
   public void testGetDecommissioningClusterNodes() throws Exception {
 MockRM rm = new MockRM() {
@@ -218,6 +226,7 @@ public class TestClientRMService {
 this.getRMContext().getRMDelegationTokenSecretManager());
   };
 };
+resourceManager = rm;
 rm.start();
 
 int nodeMemory = 1024;
@@ -230,13 +239,12 @@ public class TestClientRMService {
 rm.waitForState(nm1.getNodeId(), NodeState.DECOMMISSIONING);
 
 // Create a client.
-Configuration conf = new Configuration();
-YarnRPC rpc = YarnRPC.create(conf);
+conf = new Configuration();
+rpc = YarnRPC.create(conf);
 InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
 LOG.info("Connecting to ResourceManager at " + rmAddress);
-ApplicationClientProtocol client =
-(ApplicationClientProtocol) rpc
-.getProxy(ApplicationClientProtocol.class, rmAddress, conf);
+client = (ApplicationClientProtocol) rpc.getProxy(
+ApplicationClientProtocol.class, rmAddress, conf);
 
 // Make call
 List nodeReports = client.getClusterNodes(
@@ -247,9 +255,6 @@ public class TestClientRMService {
 NodeReport nr = nodeReports.iterator().next();
 Assert.assertEquals(decommissioningTimeout, 
nr.getDecommissioningTimeout());
 Assert.assertNull(nr.getNodeUpdateType());
-
-rpc.stopProxy(client, conf);
-rm.close();
   }
 
   @Test
@@ -261,6 +266,7 @@ public class TestClientRMService {
   this.getRMContext().getRMDelegationTokenSecretManager());
   };
 };
+resourceManager = rm;
 rm.start();
 RMNodeLabelsManager labelsMgr = rm.getRMContext().getNodeLabelManager();
 labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", 
"y"));
@@ -272,7 +278,7 @@ public class TestClientRMService {
 labelsMgr.replaceLabelsOnNode(map);
 rm.sendNodeStarted(node);
 node.nodeHeartbeat(true);
-
+
 // Add and lose a node with label = y
 MockNM l

[hadoop] branch trunk updated (9668a85d40a -> 6b67373d107)

2022-12-28 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 9668a85d40a YARN-11392 Audit Log missing in ClientRMService (#5250). 
Contributed by Beibei Zhao.
 add 6b67373d107 YARN-11388: Prevent resource leaks in TestClientRMService. 
(#5187)

No new revisions were added by this update.

Summary of changes:
 .../resourcemanager/TestClientRMService.java   | 350 ++---
 1 file changed, 160 insertions(+), 190 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated (362544a631c -> 766da519a14)

2022-12-27 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 362544a631c YARN-11395. RM UI, RMAttemptBlock can not render 
FINAL_SAVING. Contributed by Bence Kosztolnik
 add 766da519a14 YARN-11392 Audit Log missing in ClientRMService (#5250). 
Contributed by Beibei Zhao.

No new revisions were added by this update.

Summary of changes:
 .../server/resourcemanager/ClientRMService.java| 47 +-
 .../yarn/server/resourcemanager/RMAuditLogger.java |  1 +
 2 files changed, 11 insertions(+), 37 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: YARN-11392 Audit Log missing in ClientRMService (#5250). Contributed by Beibei Zhao.

2022-12-27 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 290dc7817c0 YARN-11392 Audit Log missing in ClientRMService (#5250). 
Contributed by Beibei Zhao.
290dc7817c0 is described below

commit 290dc7817c0ce6cf3015f829787bfab08c56303c
Author: curie71 <39853223+curi...@users.noreply.github.com>
AuthorDate: Wed Dec 28 07:58:53 2022 +0800

YARN-11392 Audit Log missing in ClientRMService (#5250). Contributed by 
Beibei Zhao.

Signed-off-by: Chris Nauroth 
(cherry picked from commit 9668a85d40a6a98514a24d5f25ab757501fe3423)
---
 .../server/resourcemanager/ClientRMService.java| 47 +-
 .../yarn/server/resourcemanager/RMAuditLogger.java |  1 +
 2 files changed, 11 insertions(+), 37 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index c725c2c0b36..7861a6b3e5f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -406,22 +406,11 @@ public class ClientRMService extends AbstractService 
implements
   throw new ApplicationNotFoundException("Invalid application id: null");
 }
 
-UserGroupInformation callerUGI;
-try {
-  callerUGI = UserGroupInformation.getCurrentUser();
-} catch (IOException ie) {
-  LOG.info("Error getting UGI ", ie);
-  throw RPCUtil.getRemoteException(ie);
-}
+UserGroupInformation callerUGI = getCallerUgi(applicationId,
+AuditConstants.GET_APP_REPORT);
 
-RMApp application = this.rmContext.getRMApps().get(applicationId);
-if (application == null) {
-  // If the RM doesn't have the application, throw
-  // ApplicationNotFoundException and let client to handle.
-  throw new ApplicationNotFoundException("Application with id '"
-  + applicationId + "' doesn't exist in RM. Please check "
-  + "that the job submission was successful.");
-}
+RMApp application = verifyUserAccessForRMApp(applicationId, callerUGI,
+AuditConstants.GET_APP_REPORT, ApplicationAccessType.VIEW_APP, false);
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -881,13 +870,8 @@ public class ClientRMService extends AbstractService 
implements
   @Override
   public GetApplicationsResponse getApplications(GetApplicationsRequest 
request)
   throws YarnException {
-UserGroupInformation callerUGI;
-try {
-  callerUGI = UserGroupInformation.getCurrentUser();
-} catch (IOException ie) {
-  LOG.info("Error getting UGI ", ie);
-  throw RPCUtil.getRemoteException(ie);
-}
+UserGroupInformation callerUGI = getCallerUgi(null,
+AuditConstants.GET_APPLICATIONS_REQUEST);
 
 Set applicationTypes = getLowerCasedAppTypes(request);
 EnumSet applicationStates =
@@ -1028,13 +1012,8 @@ public class ClientRMService extends AbstractService 
implements
   @Override
   public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request)
   throws YarnException {
-UserGroupInformation callerUGI;
-try {
-  callerUGI = UserGroupInformation.getCurrentUser();
-} catch (IOException ie) {
-  LOG.info("Error getting UGI ", ie);
-  throw RPCUtil.getRemoteException(ie);
-}
+UserGroupInformation callerUGI = getCallerUgi(null,
+AuditConstants.GET_QUEUE_INFO_REQUEST);
 
 GetQueueInfoResponse response =
   recordFactory.newRecordInstance(GetQueueInfoResponse.class);
@@ -1700,16 +1679,10 @@ public class ClientRMService extends AbstractService 
implements
   SignalContainerRequest request) throws YarnException, IOException {
 ContainerId containerId = request.getContainerId();
 
-UserGroupInformation callerUGI;
-try {
-  callerUGI = UserGroupInformation.getCurrentUser();
-} catch (IOException ie) {
-  LOG.info("Error getting UGI ", ie);
-  throw RPCUtil.getRemoteException(ie);
-}
-
 ApplicationId applicationId = containerId.getApplicationAttemptId().
 getApplicationId();
+UserGroupInformation callerUGI = getCallerUgi(applicationId,
+AuditConstants.SIGNAL_CONTAINER);
 RMApp app

[hadoop] branch trunk updated (d25c1be5173 -> 9668a85d40a)

2022-12-27 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from d25c1be5173 HDFS-16861. RBF. Truncate API always fails when dirs use 
AllResolver oder on Router   (#5184)
 add 9668a85d40a YARN-11392 Audit Log missing in ClientRMService (#5250). 
Contributed by Beibei Zhao.

No new revisions were added by this update.

Summary of changes:
 .../server/resourcemanager/ClientRMService.java| 47 +-
 .../yarn/server/resourcemanager/RMAuditLogger.java |  1 +
 2 files changed, 11 insertions(+), 37 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-11390. TestResourceTrackerService.testNodeRemovalNormally: Shutdown nodes should be 0 now expected: <1> but was: <0> (#5190)

2022-12-08 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new b9033f95f03 YARN-11390. 
TestResourceTrackerService.testNodeRemovalNormally: Shutdown nodes should be 0 
now expected: <1> but was: <0> (#5190)
b9033f95f03 is described below

commit b9033f95f03bcabfc41be991ed47f3fc2b164ac5
Author: K0K0V0K <109747532+k0k0...@users.noreply.github.com>
AuthorDate: Thu Dec 8 18:52:19 2022 +0100

YARN-11390. TestResourceTrackerService.testNodeRemovalNormally: Shutdown 
nodes should be 0 now expected: <1> but was: <0> (#5190)

Reviewed-by: Peter Szucs
Signed-off-by: Chris Nauroth 
(cherry picked from commit ee7d1787cd2cdfea936425dcf88e750827ff5c1b)
(cherry picked from commit 8b748c1cb821d1a547593aba89fc6ec3c34eafbf)
---
 .../TestResourceTrackerService.java| 63 +-
 1 file changed, 39 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 94d440a6d82..4528f541b3c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.nodelabels.NodeAttributeStore;
 import org.apache.hadoop.yarn.nodelabels.NodeLabelUtil;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
@@ -44,11 +45,14 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.HashSet;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 import javax.xml.parsers.DocumentBuilderFactory;
 import javax.xml.transform.Transformer;
 import javax.xml.transform.TransformerFactory;
@@ -2257,8 +2261,7 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
 }
 
 //Test decommed/ing node that transitions to untracked,timer should remove
-testNodeRemovalUtilDecomToUntracked(rmContext, conf, nm1, nm2, nm3,
-maxThreadSleeptime, doGraceful);
+testNodeRemovalUtilDecomToUntracked(rmContext, conf, nm1, nm2, nm3, 
doGraceful);
 rm.stop();
   }
 
@@ -2266,41 +2269,41 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
   // max allowed length.
   private void testNodeRemovalUtilDecomToUntracked(
   RMContext rmContext, Configuration conf,
-  MockNM nm1, MockNM nm2, MockNM nm3,
-  long maxThreadSleeptime, boolean doGraceful) throws Exception {
+  MockNM nm1, MockNM nm2, MockNM nm3, boolean doGraceful
+  ) throws Exception {
 ClusterMetrics metrics = ClusterMetrics.getMetrics();
 String ip = NetUtils.normalizeHostName("localhost");
-CountDownLatch latch = new CountDownLatch(1);
 writeToHostsFile("host1", ip, "host2");
 writeToHostsFile(excludeHostFile, "host2");
 refreshNodesOption(doGraceful, conf);
 nm1.nodeHeartbeat(true);
 //nm2.nodeHeartbeat(true);
 nm3.nodeHeartbeat(true);
-latch.await(maxThreadSleeptime, TimeUnit.MILLISECONDS);
-RMNode rmNode = doGraceful ? rmContext.getRMNodes().get(nm2.getNodeId()) :
- rmContext.getInactiveRMNodes().get(nm2.getNodeId());
-Assert.assertNotEquals("Timer for this node was not canceled!",
-rmNode, null);
-Assert.assertTrue("Node should be DECOMMISSIONED or DECOMMISSIONING",
-(rmNode.getState() == NodeState.DECOMMISSIONED) ||
-(rmNode.getState() == NodeState.DECOMMISSIONING));
+Supplier nodeSupplier = doGraceful
+? () -> rmContext.getRMNodes().get(nm2.getNodeId())
+: () -> rmContext.getInactiveRMNodes().get(nm2.getNodeId());
+pollingAssert(() -> nodeSupplier.get() != null,
+"Timer for this node was not canceled!");
+final List expectedStates = Arrays.asList(
+NodeStat

[hadoop] branch branch-3.3 updated: YARN-11390. TestResourceTrackerService.testNodeRemovalNormally: Shutdown nodes should be 0 now expected: <1> but was: <0> (#5190)

2022-12-08 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 8b748c1cb82 YARN-11390. 
TestResourceTrackerService.testNodeRemovalNormally: Shutdown nodes should be 0 
now expected: <1> but was: <0> (#5190)
8b748c1cb82 is described below

commit 8b748c1cb821d1a547593aba89fc6ec3c34eafbf
Author: K0K0V0K <109747532+k0k0...@users.noreply.github.com>
AuthorDate: Thu Dec 8 18:52:19 2022 +0100

YARN-11390. TestResourceTrackerService.testNodeRemovalNormally: Shutdown 
nodes should be 0 now expected: <1> but was: <0> (#5190)

Reviewed-by: Peter Szucs
Signed-off-by: Chris Nauroth 
(cherry picked from commit ee7d1787cd2cdfea936425dcf88e750827ff5c1b)
---
 .../TestResourceTrackerService.java| 63 +-
 1 file changed, 39 insertions(+), 24 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 0ef82436c30..5817360a0c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.net.ServerSocketUtil;
@@ -54,13 +55,16 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.HashSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
 import javax.xml.parsers.DocumentBuilderFactory;
 import javax.xml.transform.Transformer;
 import javax.xml.transform.TransformerFactory;
@@ -2284,8 +2288,7 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
 }
 
 //Test decommed/ing node that transitions to untracked,timer should remove
-testNodeRemovalUtilDecomToUntracked(rmContext, conf, nm1, nm2, nm3,
-maxThreadSleeptime, doGraceful);
+testNodeRemovalUtilDecomToUntracked(rmContext, conf, nm1, nm2, nm3, 
doGraceful);
 rm.stop();
   }
 
@@ -2293,41 +2296,41 @@ public class TestResourceTrackerService extends 
NodeLabelTestBase {
   // max allowed length.
   private void testNodeRemovalUtilDecomToUntracked(
   RMContext rmContext, Configuration conf,
-  MockNM nm1, MockNM nm2, MockNM nm3,
-  long maxThreadSleeptime, boolean doGraceful) throws Exception {
+  MockNM nm1, MockNM nm2, MockNM nm3, boolean doGraceful
+  ) throws Exception {
 ClusterMetrics metrics = ClusterMetrics.getMetrics();
 String ip = NetUtils.normalizeHostName("localhost");
-CountDownLatch latch = new CountDownLatch(1);
 writeToHostsFile("host1", ip, "host2");
 writeToHostsFile(excludeHostFile, "host2");
 refreshNodesOption(doGraceful, conf);
 nm1.nodeHeartbeat(true);
 //nm2.nodeHeartbeat(true);
 nm3.nodeHeartbeat(true);
-latch.await(maxThreadSleeptime, TimeUnit.MILLISECONDS);
-RMNode rmNode = doGraceful ? rmContext.getRMNodes().get(nm2.getNodeId()) :
- rmContext.getInactiveRMNodes().get(nm2.getNodeId());
-Assert.assertNotEquals("Timer for this node was not canceled!",
-rmNode, null);
-Assert.assertTrue("Node should be DECOMMISSIONED or DECOMMISSIONING",
-(rmNode.getState() == NodeState.DECOMMISSIONED) ||
-(rmNode.getState() == NodeState.DECOMMISSIONING));
+Supplier nodeSupplier = doGraceful
+? () -> rmContext.getRMNodes().get(nm2.getNodeId())
+: () -> rmContext.getInactiveRMNodes().get(nm2.getNodeId());
+pollingAssert(() -> nodeSupplier.get() != null,
+"Timer for this node was not canceled!");
+final List expectedStates = Arrays.asList(
+NodeState.DECOMMISSIONED,
+NodeState.DECOMMISSIONING
+);
+  

[hadoop] branch trunk updated (0a4528cd7f9 -> ee7d1787cd2)

2022-12-08 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 0a4528cd7f9 HADOOP-18563. Misleading AWS SDK S3 timeout configuration 
comment (#5197)
 add ee7d1787cd2 YARN-11390. 
TestResourceTrackerService.testNodeRemovalNormally: Shutdown nodes should be 0 
now expected: <1> but was: <0> (#5190)

No new revisions were added by this update.

Summary of changes:
 .../TestResourceTrackerService.java| 62 +-
 1 file changed, 38 insertions(+), 24 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-site] branch asf-site updated: Update org for abmod@, cnauroth@, vinayakumarb@ and weiy@. (#41)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/hadoop-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new ea5f7d586a Update org for abmod@, cnauroth@, vinayakumarb@ and weiy@. 
(#41)
ea5f7d586a is described below

commit ea5f7d586aa37864bc62a055e45bf4a6254c78ad
Author: Chris Nauroth 
AuthorDate: Tue Nov 1 22:40:54 2022 -0700

Update org for abmod@, cnauroth@, vinayakumarb@ and weiy@. (#41)
---
 content/index.html |  2 +-
 content/who.html   | 16 
 src/who.md | 12 ++--
 3 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/content/index.html b/content/index.html
index ea843bf80f..b3da0b61e7 100644
--- a/content/index.html
+++ b/content/index.html
@@ -3,7 +3,7 @@
 
 
   
-   
+   
 
 
 
diff --git a/content/who.html b/content/who.html
index 5501dddad4..bae533a159 100644
--- a/content/who.html
+++ b/content/who.html
@@ -298,8 +298,8 @@ order):
 
 
 cnauroth
-Chris Nauroth
-Hortonworks
+https://github.com/cnauroth;>Chris Nauroth
+Google
 
 -8
 
@@ -796,7 +796,7 @@ order):
 
 vinayakumarb
 https://people.apache.org/~vinayakumarb/;>Vinayakumar B
-Huawei
+Google
 
 +5.5
 
@@ -964,7 +964,7 @@ order):
 
 abmodi
 Abhishek Modi
-Microsoft
+Google
 
 +5.5
 
@@ -1166,8 +1166,8 @@ order):
 
 
 cnauroth
-Chris Nauroth
-Hortonworks
+https://github.com/cnauroth;>Chris Nauroth
+Google
 
 -8
 
@@ -2007,7 +2007,7 @@ order):
 
 vinayakumarb
 http://people.apache.org/~vinayakumarb;>Vinayakumar B
-Huawei
+Google
 HDFS
 +5.5
 
@@ -2070,7 +2070,7 @@ order):
 
 weiy
 Wei Yan
-Uber
+Google
 
 -8
 
diff --git a/src/who.md b/src/who.md
index 3cbe987b16..4891ebc92b 100644
--- a/src/who.md
+++ b/src/who.md
@@ -54,7 +54,7 @@ order):
   cdouglas |[Chris Douglas](http://people.apache.org/~cdouglas)
  |Microsoft  ||-8
   cliang   |Chen Liang 
  |LinkedIn   ||-8
   cmccabe  |[Colin Patrick 
McCabe](http://www.club.cc.cmu.edu/~cmccabe)  |Cloudera   
|HDFS    |-8
-  cnauroth |Chris Nauroth  
  |Hortonworks|    |-8
+  cnauroth |[Chris Nauroth](https://github.com/cnauroth)   
  |Google ||-8
   cutting  |[Doug Cutting](http://blog.lucene.com/)
  |Cloudera   ||-8
   daryn|Daryn Sharp
  |Verizon Media  ||-6
   ddas |[Devaraj Das](http://people.apache.org/~ddas)  
  |Hortonworks||-8
@@ -125,7 +125,7 @@ order):
   tucu |Alejandro Abdelnur 
  |Cloudera   ||-8
   umamahesh|[Uma Maheswara Rao 
G](https://people.apache.org/~umamahesh/umamahesh.html)   |Intel  | 
   |+5.5
   varunsaxena  |[Varun Saxena](http://people.apache.org/~varunsaxena)  
  |LinkedIn   ||+5.5
-  vinayakumarb |[Vinayakumar B](https://people.apache.org/~vinayakumarb/)  
  |Huawei ||+5.5
+  vinayakumarb |[Vinayakumar B](https://people.apache.org/~vinayakumarb/)  
  |Google ||+5.5
   vinodkv  |Vinod Kumar Vavilapalli
  |Hortonworks||-8
   vvasudev |Varun Vasudev  
  |   ||+5.5
   yhemanth |Hemanth Yamijala   
  |   ||+5.5
@@ -158,7 +158,7 @@ username|name
 
|---||---|--|
 aajisaka|Akira Ajisaka 
  |AWS ||+9
 ab  |Andrzej Bialecki  
  |Getopt  ||+1
-abmodi  |Abhishek Modi 
  |Microsoft   ||+5.5
+abmodi  |Abhishek Modi 
  |Google  ||+5.5
 acmurthy|[Arun C Murthy](http://people.apache.org/~acmurthy)   
  |Hortonworks ||-8
 adhoot  

[hadoop] branch branch-3.2 updated: YARN-11364. Docker Container to accept docker Image name with sha256 digest (#5092)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new d26fd2106fa YARN-11364. Docker Container to accept docker Image name 
with sha256 digest (#5092)
d26fd2106fa is described below

commit d26fd2106faa41d04d4b1db201f939463e570346
Author: Ashutosh Gupta 
AuthorDate: Tue Nov 1 21:44:35 2022 +

YARN-11364. Docker Container to accept docker Image name with sha256 digest 
(#5092)

Co-authored-by: Ashutosh Gupta 
Reviewed-by: slfan1989 <55643692+slfan1...@users.noreply.github.com>
Signed-off-by: Chris Nauroth 
(cherry picked from commit 83acb559817a97c14c4e3fd846dcc16ab615093e)
(cherry picked from commit 0961014262c5fffe69fcec6b9874c607e1e8ce77)
---
 .../linux/runtime/DockerLinuxContainerRuntime.java | 17 +--
 .../linux/runtime/TestDockerContainerRuntime.java  | 34 +-
 2 files changed, 35 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 0010e49aeac..4d95599becf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -213,6 +213,9 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   "|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])" +
   ":[0-9]+:[0-9]+$";
   private static final int HOST_NAME_LENGTH = 64;
+
+  private static final Pattern DOCKER_DIGEST_PATTERN = 
Pattern.compile("^sha256:[a-z0-9]{12,64}$");
+
   private static final String DEFAULT_PROCFS = "/proc";
 
   @InterfaceAudience.Private
@@ -1264,9 +1267,17 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   throw new ContainerExecutionException(
   ENV_DOCKER_CONTAINER_IMAGE + " not set!");
 }
-if (!dockerImagePattern.matcher(imageName).matches()) {
-  throw new ContainerExecutionException("Image name '" + imageName
-  + "' doesn't match docker image name pattern");
+// check if digest is part of imageName, extract and validate it.
+String digest = null;
+if (imageName.contains("@sha256")) {
+  String[] digestParts = imageName.split("@");
+  digest = digestParts[1];
+  imageName = digestParts[0];
+}
+if (!dockerImagePattern.matcher(imageName).matches() || (digest != null
+&& !DOCKER_DIGEST_PATTERN.matcher(digest).matches())) {
+  throw new ContainerExecutionException(
+  "Image name '" + imageName + "' doesn't match docker image name 
pattern");
 }
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index ed530caa36a..1aeed7e3a1b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -1950,19 +1950,27 @@ public class TestDockerContainerRuntime {
 
   @Test
   public void testDockerImageNamePattern() throws Exception {
-String[] validNames =
-{ "ubuntu", "fedora/httpd:version1.0",
-"fedora/httpd:version1.0.test",
-"fedora/httpd:version1.0.TEST",
-"myregistryhost:5000/ubuntu",
-"myregistryhost:5000/fedora/httpd:version1.0",
-"myregistryhost:5000/fedora/httpd:version1.0.test",
-"myregistryhost:5000/fedora/httpd:version1.0.TEST"}

[hadoop] branch branch-3.3 updated: YARN-11364. Docker Container to accept docker Image name with sha256 digest (#5092)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 0961014262c YARN-11364. Docker Container to accept docker Image name 
with sha256 digest (#5092)
0961014262c is described below

commit 0961014262c5fffe69fcec6b9874c607e1e8ce77
Author: Ashutosh Gupta 
AuthorDate: Tue Nov 1 21:44:35 2022 +

YARN-11364. Docker Container to accept docker Image name with sha256 digest 
(#5092)

Co-authored-by: Ashutosh Gupta 
Reviewed-by: slfan1989 <55643692+slfan1...@users.noreply.github.com>
Signed-off-by: Chris Nauroth 
(cherry picked from commit 83acb559817a97c14c4e3fd846dcc16ab615093e)
---
 .../linux/runtime/DockerLinuxContainerRuntime.java | 16 --
 .../linux/runtime/TestDockerContainerRuntime.java  | 34 +-
 2 files changed, 34 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 8283e062f2e..7a2f875d5a1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -206,6 +206,8 @@ public class DockerLinuxContainerRuntime extends 
OCIContainerRuntime {
   private static final Pattern dockerImagePattern =
   Pattern.compile(DOCKER_IMAGE_PATTERN);
 
+  private static final Pattern DOCKER_DIGEST_PATTERN = 
Pattern.compile("^sha256:[a-z0-9]{12,64}$");
+
   private static final String DEFAULT_PROCFS = "/proc";
 
   @InterfaceAudience.Private
@@ -1178,9 +1180,17 @@ public class DockerLinuxContainerRuntime extends 
OCIContainerRuntime {
   throw new ContainerExecutionException(
   ENV_DOCKER_CONTAINER_IMAGE + " not set!");
 }
-if (!dockerImagePattern.matcher(imageName).matches()) {
-  throw new ContainerExecutionException("Image name '" + imageName
-  + "' doesn't match docker image name pattern");
+// check if digest is part of imageName, extract and validate it.
+String digest = null;
+if (imageName.contains("@sha256")) {
+  String[] digestParts = imageName.split("@");
+  digest = digestParts[1];
+  imageName = digestParts[0];
+}
+if (!dockerImagePattern.matcher(imageName).matches() || (digest != null
+&& !DOCKER_DIGEST_PATTERN.matcher(digest).matches())) {
+  throw new ContainerExecutionException(
+  "Image name '" + imageName + "' doesn't match docker image name 
pattern");
 }
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index bb1abf51df0..51471766f11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -2034,19 +2034,27 @@ public class TestDockerContainerRuntime {
 
   @Test
   public void testDockerImageNamePattern() throws Exception {
-String[] validNames =
-{ "ubuntu", "fedora/httpd:version1.0",
-"fedora/httpd:version1.0.test",
-"fedora/httpd:version1.0.TEST",
-"myregistryhost:5000/ubuntu",
-"myregistryhost:5000/fedora/httpd:version1.0",
-"myregistryhost:5000/fedora/httpd:version1.0.test",
-"myregistryhost:5000/fedora/httpd:version1.0.TEST"};
-
-String[] invalidNames = { "Ubuntu", "ubuntu || fedora", "ubuntu#",
-"myregistryhost:50AB0/ubuntu", "

[hadoop] branch trunk updated: YARN-11364. Docker Container to accept docker Image name with sha256 digest (#5092)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 83acb559817 YARN-11364. Docker Container to accept docker Image name 
with sha256 digest (#5092)
83acb559817 is described below

commit 83acb559817a97c14c4e3fd846dcc16ab615093e
Author: Ashutosh Gupta 
AuthorDate: Tue Nov 1 21:44:35 2022 +

YARN-11364. Docker Container to accept docker Image name with sha256 digest 
(#5092)

Co-authored-by: Ashutosh Gupta 
Reviewed-by: slfan1989 <55643692+slfan1...@users.noreply.github.com>
Signed-off-by: Chris Nauroth 
---
 .../linux/runtime/DockerLinuxContainerRuntime.java | 16 --
 .../linux/runtime/TestDockerContainerRuntime.java  | 34 +-
 2 files changed, 34 insertions(+), 16 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index c89ac520f4b..14f5ffeefe0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -208,6 +208,8 @@ public class DockerLinuxContainerRuntime extends 
OCIContainerRuntime {
   private static final Pattern dockerImagePattern =
   Pattern.compile(DOCKER_IMAGE_PATTERN);
 
+  private static final Pattern DOCKER_DIGEST_PATTERN = 
Pattern.compile("^sha256:[a-z0-9]{12,64}$");
+
   private static final String DEFAULT_PROCFS = "/proc";
 
   @InterfaceAudience.Private
@@ -1201,9 +1203,17 @@ public class DockerLinuxContainerRuntime extends 
OCIContainerRuntime {
   throw new ContainerExecutionException(
   ENV_DOCKER_CONTAINER_IMAGE + " not set!");
 }
-if (!dockerImagePattern.matcher(imageName).matches()) {
-  throw new ContainerExecutionException("Image name '" + imageName
-  + "' doesn't match docker image name pattern");
+// check if digest is part of imageName, extract and validate it.
+String digest = null;
+if (imageName.contains("@sha256")) {
+  String[] digestParts = imageName.split("@");
+  digest = digestParts[1];
+  imageName = digestParts[0];
+}
+if (!dockerImagePattern.matcher(imageName).matches() || (digest != null
+&& !DOCKER_DIGEST_PATTERN.matcher(digest).matches())) {
+  throw new ContainerExecutionException(
+  "Image name '" + imageName + "' doesn't match docker image name 
pattern");
 }
   }
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index f0ae037f9ff..ea7c2138093 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -2033,19 +2033,27 @@ public class TestDockerContainerRuntime {
 
   @Test
   public void testDockerImageNamePattern() throws Exception {
-String[] validNames =
-{ "ubuntu", "fedora/httpd:version1.0",
-"fedora/httpd:version1.0.test",
-"fedora/httpd:version1.0.TEST",
-"myregistryhost:5000/ubuntu",
-"myregistryhost:5000/fedora/httpd:version1.0",
-"myregistryhost:5000/fedora/httpd:version1.0.test",
-"myregistryhost:5000/fedora/httpd:version1.0.TEST"};
-
-String[] invalidNames = { "Ubuntu", "ubuntu || fedora", "ubuntu#",
-"myregistryhost:50AB0/ubuntu", "myregistry#host:50AB0/ubuntu",
-":8080/ubuntu"
-};
+Strin

[hadoop] branch branch-3.3 updated: YARN-11363. Remove unused TimelineVersionWatcher and TimelineVersion from hadoop-yarn-server-tests (#5091)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new c5830d237b2 YARN-11363. Remove unused TimelineVersionWatcher and 
TimelineVersion from hadoop-yarn-server-tests (#5091)
c5830d237b2 is described below

commit c5830d237b2782adf591b94b89fea3f3d86154e8
Author: Ashutosh Gupta 
AuthorDate: Tue Nov 1 21:02:06 2022 +

YARN-11363. Remove unused TimelineVersionWatcher and TimelineVersion from 
hadoop-yarn-server-tests (#5091)

Reviewed-by: slfan1989 <55643692+slfan1...@users.noreply.github.com>
Signed-off-by: Chris Nauroth 
(cherry picked from commit 69225ae5b9b4153e1c2ba281fd0a09cc1514a962)
---
 .../yarn/server/timeline/TimelineVersion.java  | 31 --
 .../server/timeline/TimelineVersionWatcher.java| 47 --
 2 files changed, 78 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java
deleted file mode 100644
index 57439de078f..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-@Retention(value = RetentionPolicy.RUNTIME)
-@Target(value = {ElementType.METHOD})
-public @interface TimelineVersion {
-  float value() default TimelineVersionWatcher.DEFAULT_TIMELINE_VERSION;
-}
-
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
deleted file mode 100644
index b00f13a0aba..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.junit.rules.TestWatcher;
-import org.junit.runner.Description;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class TimelineVersionWatcher extends TestWatcher {
-  static final float DEFAULT_TIMELINE_VERSION = 1.0f;
-  private TimelineVersion version;
-
-  @Override
-  protected void starting(Description description) {
-version = description.getAnnotation(TimelineVersion.class);
-  }
-
-  /**
-   * @return the version number of timeline server for the current test (using
-   * timeline server v1.0 by defa

[hadoop] branch trunk updated: YARN-11363. Remove unused TimelineVersionWatcher and TimelineVersion from hadoop-yarn-server-tests (#5091)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 69225ae5b9b YARN-11363. Remove unused TimelineVersionWatcher and 
TimelineVersion from hadoop-yarn-server-tests (#5091)
69225ae5b9b is described below

commit 69225ae5b9b4153e1c2ba281fd0a09cc1514a962
Author: Ashutosh Gupta 
AuthorDate: Tue Nov 1 21:02:06 2022 +

YARN-11363. Remove unused TimelineVersionWatcher and TimelineVersion from 
hadoop-yarn-server-tests (#5091)

Reviewed-by: slfan1989 <55643692+slfan1...@users.noreply.github.com>
Signed-off-by: Chris Nauroth 
---
 .../yarn/server/timeline/TimelineVersion.java  | 31 -
 .../server/timeline/TimelineVersionWatcher.java| 32 --
 2 files changed, 63 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java
deleted file mode 100644
index 57439de078f..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-@Retention(value = RetentionPolicy.RUNTIME)
-@Target(value = {ElementType.METHOD})
-public @interface TimelineVersion {
-  float value() default TimelineVersionWatcher.DEFAULT_TIMELINE_VERSION;
-}
-
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
deleted file mode 100644
index e06281ce33d..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import org.junit.jupiter.api.extension.TestWatcher;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class TimelineVersionWatcher implements TestWatcher {
-  static final float DEFAULT_TIMELINE_VERSION = 1.0f;
-  private TimelineVersion version;
-
-}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: document fix for MAPREDUCE-7425 (#5090)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new c0d8a63a45c document fix for MAPREDUCE-7425 (#5090)
c0d8a63a45c is described below

commit c0d8a63a45c621a9957a3af6092ad73e8708ffac
Author: wangteng13 <45892487+wangten...@users.noreply.github.com>
AuthorDate: Wed Nov 2 04:34:59 2022 +0800

document fix for MAPREDUCE-7425 (#5090)

Reviewed-by: Ashutosh Gupta 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 388f2f182f1f21ca0e3931db8b125f327f77880b)
(cherry picked from commit 4da1cad680d4783c3906a6a7e515b28b4acb9f37)
---
 .../src/main/resources/mapred-default.xml | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index bba382c60e1..2c245da4402 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1590,14 +1590,18 @@
   yarn.app.mapreduce.client-am.ipc.max-retries
   3
   The number of client retries to the AM - before reconnecting
-to the RM to fetch Application Status.
+to the RM to fetch Application Status.
+In other words, it is the ipc.client.connect.max.retries to be used during
+reconnecting to the RM and fetching Application Status.
 
 
 
   yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts
   3
   The number of client retries on socket timeouts to the AM - 
before
-reconnecting to the RM to fetch Application Status.
+reconnecting to the RM to fetch Application Status.
+In other words, it is the ipc.client.connect.max.retries.on.timeouts to be 
used during
+reconnecting to the RM and fetching Application Status.
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: document fix for MAPREDUCE-7425 (#5090)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 4da1cad680d document fix for MAPREDUCE-7425 (#5090)
4da1cad680d is described below

commit 4da1cad680d4783c3906a6a7e515b28b4acb9f37
Author: wangteng13 <45892487+wangten...@users.noreply.github.com>
AuthorDate: Wed Nov 2 04:34:59 2022 +0800

document fix for MAPREDUCE-7425 (#5090)

Reviewed-by: Ashutosh Gupta 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 388f2f182f1f21ca0e3931db8b125f327f77880b)
---
 .../src/main/resources/mapred-default.xml | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index ac7948f92a4..06e9c03861f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1660,14 +1660,18 @@
   yarn.app.mapreduce.client-am.ipc.max-retries
   3
   The number of client retries to the AM - before reconnecting
-to the RM to fetch Application Status.
+to the RM to fetch Application Status.
+In other words, it is the ipc.client.connect.max.retries to be used during
+reconnecting to the RM and fetching Application Status.
 
 
 
   yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts
   3
   The number of client retries on socket timeouts to the AM - 
before
-reconnecting to the RM to fetch Application Status.
+reconnecting to the RM to fetch Application Status.
+In other words, it is the ipc.client.connect.max.retries.on.timeouts to be 
used during
+reconnecting to the RM and fetching Application Status.
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (7ba304d1c63 -> 388f2f182f1)

2022-11-01 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 7ba304d1c63 HADOOP-18512: upgrade woodstox-core to 5.4.0 for security 
fix (#5087). Contributed by PJ Fanning.
 add 388f2f182f1 document fix for MAPREDUCE-7425 (#5090)

No new revisions were added by this update.

Summary of changes:
 .../src/main/resources/mapred-default.xml | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-11360: Add number of decommissioning/shutdown nodes to YARN cluster metrics. (#5060)

2022-10-28 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new cd18b784da1 YARN-11360: Add number of decommissioning/shutdown nodes 
to YARN cluster metrics. (#5060)
cd18b784da1 is described below

commit cd18b784da1e4dd9434696a7b599e823defea134
Author: Chris Nauroth 
AuthorDate: Fri Oct 28 11:07:01 2022 -0700

YARN-11360: Add number of decommissioning/shutdown nodes to YARN cluster 
metrics. (#5060)

(cherry picked from commit bfb84cd7f66fb6fe98809cc5d6c59864995855b1)
(cherry picked from commit 33293d4ba44c545ba305272a381c42ce758ffd96)
---
 .../yarn/api/records/YarnClusterMetrics.java   | 26 +
 .../src/main/proto/yarn_protos.proto   |  2 +
 .../org/apache/hadoop/yarn/client/cli/TopCLI.java  | 18 --
 .../apache/hadoop/yarn/client/cli/TestTopCLI.java  | 66 +-
 .../records/impl/pb/YarnClusterMetricsPBImpl.java  | 33 ++-
 .../server/resourcemanager/ClientRMService.java|  2 +
 .../resourcemanager/TestClientRMService.java   | 56 ++
 .../router/clientrm/RouterYarnClientUtils.java |  6 ++
 .../router/clientrm/TestRouterYarnClientUtils.java |  4 ++
 9 files changed, 206 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
index fc3edf7fb74..f460e60f483 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
@@ -53,6 +53,20 @@ public abstract class YarnClusterMetrics {
   @Unstable
   public abstract void setNumNodeManagers(int numNodeManagers);
 
+  /**
+   * Get the number of DecommissioningNodeManagers in the cluster.
+   *
+   * @return number of DecommissioningNodeManagers in the cluster
+   */
+  @Public
+  @Unstable
+  public abstract int getNumDecommissioningNodeManagers();
+
+  @Private
+  @Unstable
+  public abstract void setNumDecommissioningNodeManagers(
+  int numDecommissioningNodeManagers);
+
   /**
* Get the number of DecommissionedNodeManagers in the cluster.
* 
@@ -119,4 +133,16 @@ public abstract class YarnClusterMetrics {
   @Unstable
   public abstract void setNumRebootedNodeManagers(int numRebootedNodeManagers);
 
+  /**
+   * Get the number of ShutdownNodeManagers in the cluster.
+   *
+   * @return number of ShutdownNodeManagers in the cluster
+   */
+  @Public
+  @Unstable
+  public abstract int getNumShutdownNodeManagers();
+
+  @Private
+  @Unstable
+  public abstract void setNumShutdownNodeManagers(int numShutdownNodeManagers);
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 3314356c0b5..c0ef48579e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -561,6 +561,8 @@ message YarnClusterMetricsProto {
   optional int32 num_lost_nms = 4;
   optional int32 num_unhealthy_nms = 5;
   optional int32 num_rebooted_nms = 6;
+  optional int32 num_decommissioning_nms = 7;
+  optional int32 num_shutdown_nms = 8;
 }
 
 enum QueueStateProto {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
index 0d918065070..1b6890f7bb9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
@@ -339,9 +339,11 @@ public class TopCLI extends YarnCLI {
 int totalNodes;
 int runningNodes;
 int unhealthyNodes;
+int decommissioningNodes;
 int decommissionedNodes;
 int lostNodes;
 int rebootedNodes;
+int shutdownNodes;
   }
 
   private static class QueueMetrics {
@@ -696,6 +698,8 @@ public class TopCLI extends YarnCLI {
   return nodeInfo;
 }
 
+nodeInfo.decommissioningNodes =
+yarnClusterMetrics.getNumDecommissioningNodeManagers();
 nodeInfo.decommissionedNodes =
 yarnClusterMetrics.getNumDecommissionedNodeManagers();
 nodeInfo.totalNodes = yarnClusterMetrics.getNumNodeManagers();
@@ -703,6 +707,7 @@ public class TopCLI extends YarnCLI {
 nodeInfo.lostNodes

[hadoop] branch branch-3.3 updated: YARN-11360: Add number of decommissioning/shutdown nodes to YARN cluster metrics. (#5060)

2022-10-28 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 33293d4ba44 YARN-11360: Add number of decommissioning/shutdown nodes 
to YARN cluster metrics. (#5060)
33293d4ba44 is described below

commit 33293d4ba44c545ba305272a381c42ce758ffd96
Author: Chris Nauroth 
AuthorDate: Fri Oct 28 11:07:01 2022 -0700

YARN-11360: Add number of decommissioning/shutdown nodes to YARN cluster 
metrics. (#5060)

(cherry picked from commit bfb84cd7f66fb6fe98809cc5d6c59864995855b1)
---
 .../yarn/api/records/YarnClusterMetrics.java   | 26 +
 .../src/main/proto/yarn_protos.proto   |  2 +
 .../org/apache/hadoop/yarn/client/cli/TopCLI.java  | 16 --
 .../apache/hadoop/yarn/client/cli/TestTopCLI.java  | 66 +-
 .../records/impl/pb/YarnClusterMetricsPBImpl.java  | 33 ++-
 .../server/resourcemanager/ClientRMService.java|  2 +
 .../resourcemanager/TestClientRMService.java   | 56 ++
 .../router/clientrm/RouterYarnClientUtils.java |  6 ++
 .../router/clientrm/TestRouterYarnClientUtils.java |  4 ++
 9 files changed, 204 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
index fc3edf7fb74..f460e60f483 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java
@@ -53,6 +53,20 @@ public abstract class YarnClusterMetrics {
   @Unstable
   public abstract void setNumNodeManagers(int numNodeManagers);
 
+  /**
+   * Get the number of DecommissioningNodeManagers in the cluster.
+   *
+   * @return number of DecommissioningNodeManagers in the cluster
+   */
+  @Public
+  @Unstable
+  public abstract int getNumDecommissioningNodeManagers();
+
+  @Private
+  @Unstable
+  public abstract void setNumDecommissioningNodeManagers(
+  int numDecommissioningNodeManagers);
+
   /**
* Get the number of DecommissionedNodeManagers in the cluster.
* 
@@ -119,4 +133,16 @@ public abstract class YarnClusterMetrics {
   @Unstable
   public abstract void setNumRebootedNodeManagers(int numRebootedNodeManagers);
 
+  /**
+   * Get the number of ShutdownNodeManagers in the cluster.
+   *
+   * @return number of ShutdownNodeManagers in the cluster
+   */
+  @Public
+  @Unstable
+  public abstract int getNumShutdownNodeManagers();
+
+  @Private
+  @Unstable
+  public abstract void setNumShutdownNodeManagers(int numShutdownNodeManagers);
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index e70d471384e..cba5832f84a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -571,6 +571,8 @@ message YarnClusterMetricsProto {
   optional int32 num_lost_nms = 4;
   optional int32 num_unhealthy_nms = 5;
   optional int32 num_rebooted_nms = 6;
+  optional int32 num_decommissioning_nms = 7;
+  optional int32 num_shutdown_nms = 8;
 }
 
 enum QueueStateProto {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
index 79b1406ed18..59d82275e6a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
@@ -339,9 +339,11 @@ public class TopCLI extends YarnCLI {
 int totalNodes;
 int runningNodes;
 int unhealthyNodes;
+int decommissioningNodes;
 int decommissionedNodes;
 int lostNodes;
 int rebootedNodes;
+int shutdownNodes;
   }
 
   private static class QueueMetrics {
@@ -696,6 +698,8 @@ public class TopCLI extends YarnCLI {
   return nodeInfo;
 }
 
+nodeInfo.decommissioningNodes =
+yarnClusterMetrics.getNumDecommissioningNodeManagers();
 nodeInfo.decommissionedNodes =
 yarnClusterMetrics.getNumDecommissionedNodeManagers();
 nodeInfo.totalNodes = yarnClusterMetrics.getNumNodeManagers();
@@ -703,6 +707,7 @@ public class TopCLI extends YarnCLI {
 nodeInfo.lostNodes = yarnClusterMetrics.getNumLostNodeManagers();
 nodeInfo.unhealthyNodes

[hadoop] branch trunk updated (88f7f5bc015 -> bfb84cd7f66)

2022-10-28 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 88f7f5bc015 HDFS-16802.Print options when accessing 
ClientProtocol#rename2(). (#5013)
 add bfb84cd7f66 YARN-11360: Add number of decommissioning/shutdown nodes 
to YARN cluster metrics. (#5060)

No new revisions were added by this update.

Summary of changes:
 .../yarn/api/records/YarnClusterMetrics.java   | 26 +
 .../src/main/proto/yarn_protos.proto   |  2 +
 .../org/apache/hadoop/yarn/client/cli/TopCLI.java  | 16 --
 .../apache/hadoop/yarn/client/cli/TestTopCLI.java  | 66 +-
 .../records/impl/pb/YarnClusterMetricsPBImpl.java  | 33 ++-
 .../server/resourcemanager/ClientRMService.java|  2 +
 .../resourcemanager/TestClientRMService.java   | 56 ++
 .../router/clientrm/RouterYarnClientUtils.java |  7 ++-
 .../router/clientrm/TestRouterYarnClientUtils.java |  4 ++
 9 files changed, 204 insertions(+), 8 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: MAPREDUCE-7370. Parallelize MultipleOutputs#close call (#4248). Contributed by Ashutosh Gupta.

2022-10-06 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 725cd907125 MAPREDUCE-7370. Parallelize MultipleOutputs#close call 
(#4248). Contributed by Ashutosh Gupta.
725cd907125 is described below

commit 725cd9071254d1099fa5eb9b03acb203568896f7
Author: Ashutosh Gupta 
AuthorDate: Thu Oct 6 23:23:05 2022 +0100

MAPREDUCE-7370. Parallelize MultipleOutputs#close call (#4248). Contributed 
by Ashutosh Gupta.

Reviewed-by: Akira Ajisaka 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 062c50db6bebfabae38aec9e17be2483a11c3f7f)
---
 .../apache/hadoop/mapred/lib/MultipleOutputs.java  | 73 --
 .../java/org/apache/hadoop/mapreduce/MRConfig.java |  2 +
 .../mapreduce/lib/output/MultipleOutputs.java  | 56 -
 .../hadoop/mapred/lib/TestMultipleOutputs.java | 20 ++
 .../lib/output/TestMRMultipleOutputs.java  | 23 ++-
 5 files changed, 165 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
index 3ef6601fbfe..a214420df80 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputs.java
@@ -17,15 +17,39 @@
  */
 package org.apache.hadoop.mapred.lib;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.StringTokenizer;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import 
org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.mapred.*;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.OutputCollector;
+import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.util.Progressable;
 
-import java.io.IOException;
-import java.util.*;
-
 /**
  * The MultipleOutputs class simplifies writing to additional outputs other
  * than the job default output via the OutputCollector passed to
@@ -132,6 +156,7 @@ public class MultipleOutputs {
* Counters group used by the counters of MultipleOutputs.
*/
   private static final String COUNTERS_GROUP = MultipleOutputs.class.getName();
+  private static final Logger LOG = 
LoggerFactory.getLogger(MultipleOutputs.class);
 
   /**
* Checks if a named output is alreadyDefined or not.
@@ -381,6 +406,11 @@ public class MultipleOutputs {
   private Map recordWriters;
   private boolean countersEnabled;
 
+  @VisibleForTesting
+  synchronized void setRecordWriters(Map recordWriters) {
+this.recordWriters = recordWriters;
+  }
+
   /**
* Creates and initializes multiple named outputs support, it should be
* instantiated in the Mapper/Reducer configure method.
@@ -528,8 +558,41 @@ public class MultipleOutputs {
* could not be closed properly.
*/
   public void close() throws IOException {
+int nThreads = conf.getInt(MRConfig.MULTIPLE_OUTPUTS_CLOSE_THREAD_COUNT,
+MRConfig.DEFAULT_MULTIPLE_OUTPUTS_CLOSE_THREAD_COUNT);
+AtomicBoolean encounteredException = new AtomicBoolean(false);
+ThreadFactory threadFactory = new 
ThreadFactoryBuilder().setNameFormat("MultipleOutputs-close")
+.setUncaughtExceptionHandler(((t, e) -> {
+  LOG.error("Thread " + t + " failed unexpectedly", e);
+  encounteredException.set(true);
+})).build();
+ExecutorService executorService = Executors.newFixedThreadPool(nThreads, 
threadFactory);
+
+List> callableList = new 
ArrayList<>(recordWriters.size());
+
 for (RecordWriter writer : recordWr

[hadoop] branch trunk updated (8336b91329c -> 062c50db6be)

2022-10-06 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 8336b91329c HADOOP-18469. Add secure XML parser factories to XMLUtils 
(#4940)
 add 062c50db6be MAPREDUCE-7370. Parallelize MultipleOutputs#close call 
(#4248). Contributed by Ashutosh Gupta.

No new revisions were added by this update.

Summary of changes:
 .../apache/hadoop/mapred/lib/MultipleOutputs.java  | 73 --
 .../java/org/apache/hadoop/mapreduce/MRConfig.java |  2 +
 .../mapreduce/lib/output/MultipleOutputs.java  | 55 +++-
 .../hadoop/mapred/lib/TestMultipleOutputs.java | 20 ++
 .../lib/output/TestMRMultipleOutputs.java  | 23 ++-
 5 files changed, 165 insertions(+), 8 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: MAPREDUCE-7372 MapReduce set permission too late in copyJar method (#4026). Contributed by Zhang Dongsheng.

2022-07-25 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new ddb59cd3ccc MAPREDUCE-7372 MapReduce set permission too late in 
copyJar method (#4026). Contributed by Zhang Dongsheng.
ddb59cd3ccc is described below

commit ddb59cd3ccca98b641526f096d5503401bf8682d
Author: skysiders <64545691+skysid...@users.noreply.github.com>
AuthorDate: Tue Jul 26 02:38:59 2022 +0800

MAPREDUCE-7372 MapReduce set permission too late in copyJar method (#4026). 
Contributed by Zhang Dongsheng.

Reviewed-by: Steve Loughran 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 9fe96238d2cf9f32cd36888098bccc5a4cfe1723)
(cherry picked from commit 1d2a60f6230cce53dc7eb3be4c38335dfd493ca5)
---
 .../main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index c8686d7162e..0eaa4673113 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -784,9 +784,11 @@ class JobResourceUploader {
   void copyJar(Path originalJarPath, Path submitJarFile,
   short replication) throws IOException {
 jtFs.copyFromLocalFile(originalJarPath, submitJarFile);
-jtFs.setReplication(submitJarFile, replication);
+// The operation of setReplication requires certain permissions
+// so we need to make sure it has enough permissions
 jtFs.setPermission(submitJarFile, new FsPermission(
 JobSubmissionFiles.JOB_FILE_PERMISSION));
+jtFs.setReplication(submitJarFile, replication);
   }
 
   private void addLog4jToDistributedCache(Job job, Path jobSubmitDir)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: MAPREDUCE-7372 MapReduce set permission too late in copyJar method (#4026). Contributed by Zhang Dongsheng.

2022-07-25 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 1d2a60f6230 MAPREDUCE-7372 MapReduce set permission too late in 
copyJar method (#4026). Contributed by Zhang Dongsheng.
1d2a60f6230 is described below

commit 1d2a60f6230cce53dc7eb3be4c38335dfd493ca5
Author: skysiders <64545691+skysid...@users.noreply.github.com>
AuthorDate: Tue Jul 26 02:38:59 2022 +0800

MAPREDUCE-7372 MapReduce set permission too late in copyJar method (#4026). 
Contributed by Zhang Dongsheng.

Reviewed-by: Steve Loughran 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 9fe96238d2cf9f32cd36888098bccc5a4cfe1723)
---
 .../main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index d26354913a6..90b9c8a41ae 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -783,9 +783,11 @@ class JobResourceUploader {
   void copyJar(Path originalJarPath, Path submitJarFile,
   short replication) throws IOException {
 jtFs.copyFromLocalFile(originalJarPath, submitJarFile);
-jtFs.setReplication(submitJarFile, replication);
+// The operation of setReplication requires certain permissions
+// so we need to make sure it has enough permissions
 jtFs.setPermission(submitJarFile, new FsPermission(
 JobSubmissionFiles.JOB_FILE_PERMISSION));
+jtFs.setReplication(submitJarFile, replication);
   }
 
   private void addLog4jToDistributedCache(Job job, Path jobSubmitDir)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (6ba2c537206 -> 9fe96238d2c)

2022-07-25 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from 6ba2c537206 HDFS-16681. Do not pass GCC flags for MSVC in libhdfspp 
(#4615)
 add 9fe96238d2c MAPREDUCE-7372 MapReduce set permission too late in 
copyJar method (#4026). Contributed by Zhang Dongsheng.

No new revisions were added by this update.

Summary of changes:
 .../main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-18300. Upgrade Gson dependency to version 2.9.0 (#4454)

2022-06-22 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 399580d3023 HADOOP-18300. Upgrade Gson dependency to version 2.9.0 
(#4454)
399580d3023 is described below

commit 399580d30232c5bc808d7c71784f555bafcfc334
Author: Igor Dvorzhak 
AuthorDate: Wed Jun 22 16:37:22 2022 -0700

HADOOP-18300. Upgrade Gson dependency to version 2.9.0 (#4454)

Reviewed-by: Ayush Saxena 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 77d1b194c77ec62dc31f89e15594bc1250d88de3)
(cherry picked from commit d41e0a9cc30c9dbf4ba311b3ed18a6942a160287)
---
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 6 ++
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 7 +++
 hadoop-project/pom.xml  | 2 +-
 3 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index dce3e0326c9..6675387f6a9 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -723,6 +723,12 @@
 testdata/*
   
 
+
+  com.google.code.gson:gson
+  
+
META-INF/versions/9/module-info.class
+  
+
 
 
   org.mockito:mockito-all
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 41d96655904..9318afafbbb 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -232,6 +232,13 @@
 update*
   
 
+
+  com.google.code.gson:gson
+  
+
META-INF/versions/9/module-info.class
+  
+
+
   
   
 
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 068b7728483..1e47ff6fe6d 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -129,7 +129,7 @@
 2.7.5
 1.1
 2.2.21
-2.8.9
+2.9.0
 3.1.0-incubating
 4.1.0-incubating
 3.2.4


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-18300. Upgrade Gson dependency to version 2.9.0 (#4454)

2022-06-22 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new d41e0a9cc30 HADOOP-18300. Upgrade Gson dependency to version 2.9.0 
(#4454)
d41e0a9cc30 is described below

commit d41e0a9cc30c9dbf4ba311b3ed18a6942a160287
Author: Igor Dvorzhak 
AuthorDate: Wed Jun 22 16:37:22 2022 -0700

HADOOP-18300. Upgrade Gson dependency to version 2.9.0 (#4454)

Reviewed-by: Ayush Saxena 
Signed-off-by: Chris Nauroth 
(cherry picked from commit 77d1b194c77ec62dc31f89e15594bc1250d88de3)
---
 LICENSE-binary  | 2 +-
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 6 ++
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 7 +++
 hadoop-project/pom.xml  | 2 +-
 4 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index 03c42f4deca..7fb2ee180d7 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -232,7 +232,7 @@ com.google:guice-servlet:4.0
 com.google.android:annotations:jar:4.1.1.4
 com.google.api.grpc:proto-google-common-protos:1.12.0
 com.google.code.findbugs:jsr305:3.0.2
-com.google.code.gson:gson:2.2.4
+com.google.code.gson:gson:2.9.0
 com.google.errorprone:error_prone_annotations:2.3.3
 com.google.j2objc:j2objc-annotations:1.1
 com.google.json-simple:json-simple:1.1.1
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index e8e58e6b915..76a2ebf0365 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -747,6 +747,12 @@
 
META-INF/versions/9/module-info.class
   
 
+
+  com.google.code.gson:gson
+  
+
META-INF/versions/9/module-info.class
+  
+
 
 
 
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 99bb298f8ec..6f17134b94f 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -249,6 +249,13 @@
 
META-INF/versions/9/module-info.class
   
 
+
+  com.google.code.gson:gson
+  
+
META-INF/versions/9/module-info.class
+  
+
+
   
   
 
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index bcb7d97fdee..8dc1862ed57 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -141,7 +141,7 @@
 1.1
 5.2.0
 2.2.21
-2.8.9
+2.9.0
 3.2.4
 3.10.6.Final
 4.1.68.Final


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-18300. Upgrade Gson dependency to version 2.9.0 (#4454)

2022-06-22 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 77d1b194c77 HADOOP-18300. Upgrade Gson dependency to version 2.9.0 
(#4454)
77d1b194c77 is described below

commit 77d1b194c77ec62dc31f89e15594bc1250d88de3
Author: Igor Dvorzhak 
AuthorDate: Wed Jun 22 16:37:22 2022 -0700

HADOOP-18300. Upgrade Gson dependency to version 2.9.0 (#4454)

Reviewed-by: Ayush Saxena 
Signed-off-by: Chris Nauroth 
---
 LICENSE-binary  | 2 +-
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 6 ++
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 7 +++
 hadoop-project/pom.xml  | 2 +-
 4 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index 980b9c7f2b6..49948526390 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -231,7 +231,7 @@ com.github.stephenc.jcip:jcip-annotations:1.0-1
 com.google:guice:4.0
 com.google:guice-servlet:4.0
 com.google.api.grpc:proto-google-common-protos:1.0.0
-com.google.code.gson:2.2.4
+com.google.code.gson:2.9.0
 com.google.errorprone:error_prone_annotations:2.2.0
 com.google.j2objc:j2objc-annotations:1.1
 com.google.json-simple:json-simple:1.1.1
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 06e36837a20..4c8900dc2af 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -757,6 +757,12 @@
 
META-INF/versions/11/module-info.class
   
 
+
+  com.google.code.gson:gson
+  
+
META-INF/versions/9/module-info.class
+  
+
 
 
 
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 35fbd7665fb..98756c24395 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -249,6 +249,13 @@
 
META-INF/versions/11/module-info.class
   
 
+
+  com.google.code.gson:gson
+  
+
META-INF/versions/9/module-info.class
+  
+
+
   
   
 
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 2bbb57bdf43..e8cb47efe4b 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -141,7 +141,7 @@
 2.0.6.1
 5.2.0
 2.2.21
-2.8.9
+2.9.0
 3.2.4
 3.10.6.Final
 4.1.68.Final


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-16623. Avoid IllegalArgumentException in LifelineSender (#4409)

2022-06-10 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new aba44189816 HDFS-16623. Avoid IllegalArgumentException in 
LifelineSender (#4409)
aba44189816 is described below

commit aba44189816fea529f56da2e214a3d35a7aaf5e2
Author: xuzq <15040255...@163.com>
AuthorDate: Sat Jun 11 03:00:56 2022 +0800

HDFS-16623. Avoid IllegalArgumentException in LifelineSender (#4409)

* HDFS-16623. Avoid IllegalArgumentException in LifelineSender

Co-authored-by: zengqiang.xu 
(cherry picked from commit af5003a47311bad542964c42c1f776e4350446b9)
(cherry picked from commit ee3ee98ee5a4c34da7a4960b099686bdc1186a71)
---
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java  |  3 ++-
 .../hdfs/server/datanode/TestBpServiceActorScheduler.java   | 13 +
 2 files changed, 15 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index ea914024699..0b3eb14dff8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -1241,7 +1241,8 @@ class BPServiceActor implements Runnable {
 }
 
 long getLifelineWaitTime() {
-  return nextLifelineTime - monotonicNow();
+  long waitTime = nextLifelineTime - monotonicNow();
+  return waitTime > 0 ? waitTime : 0;
 }
 
 /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
index 438be89be92..0bd450bed7e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.server.datanode.BPServiceActor.Scheduler;
@@ -182,6 +183,18 @@ public class TestBpServiceActorScheduler {
 }
   }
 
+  @Test
+  public void testScheduleLifelineScheduleTime() {
+Scheduler mockScheduler = spy(new Scheduler(
+HEARTBEAT_INTERVAL_MS, LIFELINE_INTERVAL_MS,
+BLOCK_REPORT_INTERVAL_MS, OUTLIER_REPORT_INTERVAL_MS));
+long now = Time.monotonicNow();
+mockScheduler.scheduleNextLifeline(now);
+long mockMonotonicNow = now + LIFELINE_INTERVAL_MS * 2;
+doReturn(mockMonotonicNow).when(mockScheduler).monotonicNow();
+assertTrue(mockScheduler.getLifelineWaitTime() >= 0);
+  }
+
   @Test
   public void testOutlierReportScheduling() {
 for (final long now : getTimestamps()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-16623. Avoid IllegalArgumentException in LifelineSender (#4409)

2022-06-10 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new ee3ee98ee5a HDFS-16623. Avoid IllegalArgumentException in 
LifelineSender (#4409)
ee3ee98ee5a is described below

commit ee3ee98ee5a4c34da7a4960b099686bdc1186a71
Author: xuzq <15040255...@163.com>
AuthorDate: Sat Jun 11 03:00:56 2022 +0800

HDFS-16623. Avoid IllegalArgumentException in LifelineSender (#4409)

* HDFS-16623. Avoid IllegalArgumentException in LifelineSender

Co-authored-by: zengqiang.xu 
(cherry picked from commit af5003a47311bad542964c42c1f776e4350446b9)
---
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java  |  3 ++-
 .../hdfs/server/datanode/TestBpServiceActorScheduler.java   | 13 +
 2 files changed, 15 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 8a33e0baaae..d5e3cfd65c2 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -1286,7 +1286,8 @@ class BPServiceActor implements Runnable {
 }
 
 long getLifelineWaitTime() {
-  return nextLifelineTime - monotonicNow();
+  long waitTime = nextLifelineTime - monotonicNow();
+  return waitTime > 0 ? waitTime : 0;
 }
 
 @VisibleForTesting
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
index f8406ed7a3f..166ae118f78 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.server.datanode.BPServiceActor.Scheduler;
@@ -204,6 +205,18 @@ public class TestBpServiceActorScheduler {
 }
   }
 
+  @Test
+  public void testScheduleLifelineScheduleTime() {
+Scheduler mockScheduler = spy(new Scheduler(
+HEARTBEAT_INTERVAL_MS, LIFELINE_INTERVAL_MS,
+BLOCK_REPORT_INTERVAL_MS, OUTLIER_REPORT_INTERVAL_MS));
+long now = Time.monotonicNow();
+mockScheduler.scheduleNextLifeline(now);
+long mockMonotonicNow = now + LIFELINE_INTERVAL_MS * 2;
+doReturn(mockMonotonicNow).when(mockScheduler).monotonicNow();
+assertTrue(mockScheduler.getLifelineWaitTime() >= 0);
+  }
+
   @Test
   public void testOutlierReportScheduling() {
 for (final long now : getTimestamps()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-16623. Avoid IllegalArgumentException in LifelineSender (#4409)

2022-06-10 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new af5003a4731 HDFS-16623. Avoid IllegalArgumentException in 
LifelineSender (#4409)
af5003a4731 is described below

commit af5003a47311bad542964c42c1f776e4350446b9
Author: xuzq <15040255...@163.com>
AuthorDate: Sat Jun 11 03:00:56 2022 +0800

HDFS-16623. Avoid IllegalArgumentException in LifelineSender (#4409)

* HDFS-16623. Avoid IllegalArgumentException in LifelineSender

Co-authored-by: zengqiang.xu 
---
 .../apache/hadoop/hdfs/server/datanode/BPServiceActor.java  |  3 ++-
 .../hdfs/server/datanode/TestBpServiceActorScheduler.java   | 13 +
 2 files changed, 15 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 838259d7f6a..d1b7024bc96 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -1345,7 +1345,8 @@ class BPServiceActor implements Runnable {
 }
 
 long getLifelineWaitTime() {
-  return nextLifelineTime - monotonicNow();
+  long waitTime = nextLifelineTime - monotonicNow();
+  return waitTime > 0 ? waitTime : 0;
 }
 
 @VisibleForTesting
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
index f8406ed7a3f..166ae118f78 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.server.datanode.BPServiceActor.Scheduler;
@@ -204,6 +205,18 @@ public class TestBpServiceActorScheduler {
 }
   }
 
+  @Test
+  public void testScheduleLifelineScheduleTime() {
+Scheduler mockScheduler = spy(new Scheduler(
+HEARTBEAT_INTERVAL_MS, LIFELINE_INTERVAL_MS,
+BLOCK_REPORT_INTERVAL_MS, OUTLIER_REPORT_INTERVAL_MS));
+long now = Time.monotonicNow();
+mockScheduler.scheduleNextLifeline(now);
+long mockMonotonicNow = now + LIFELINE_INTERVAL_MS * 2;
+doReturn(mockMonotonicNow).when(mockScheduler).monotonicNow();
+assertTrue(mockScheduler.getLifelineWaitTime() >= 0);
+  }
+
   @Test
   public void testOutlierReportScheduling() {
 for (final long now : getTimestamps()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)

2021-12-30 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new e88fe6a  HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)
e88fe6a is described below

commit e88fe6a2ad788bf9b7ae6e647cc449a9b3995c79
Author: Igor Dvorzhak 
AuthorDate: Thu Dec 30 13:27:54 2021 -0800

HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)

Change-Id: Ifd3fb9ec6ebfc8874bb799bc198219511fe55a2f

Update pom.xml

Update pom.xml

(cherry picked from commit 795054882af72a9d5d4f4e96f775e96473d75c23)
(cherry picked from commit 5d72fdfcb2d8bc1415b3a603075048a231da18ca)
Change-Id: I7bcea58613340bf97a5038da40a3643971f99364
---
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 636b9b1..f144ff5 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -129,7 +129,7 @@
 2.7.5
 1.1
 2.2.21
-2.2.4
+2.8.9
 3.1.0-incubating
 4.1.0-incubating
 3.2.4

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)

2021-12-30 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 5d72fdf  HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)
5d72fdf is described below

commit 5d72fdfcb2d8bc1415b3a603075048a231da18ca
Author: Igor Dvorzhak 
AuthorDate: Thu Dec 30 13:27:54 2021 -0800

HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)

Change-Id: Ifd3fb9ec6ebfc8874bb799bc198219511fe55a2f

Update pom.xml

Update pom.xml

(cherry picked from commit 795054882af72a9d5d4f4e96f775e96473d75c23)
---
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 69afddd..aa8d441 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -139,7 +139,7 @@
 1.1
 5.2.0
 2.2.21
-2.2.4
+2.8.9
 3.2.4
 3.10.6.Final
 4.1.68.Final

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)

2021-12-30 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7950548  HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)
7950548 is described below

commit 795054882af72a9d5d4f4e96f775e96473d75c23
Author: Igor Dvorzhak 
AuthorDate: Thu Dec 30 13:27:54 2021 -0800

HADOOP-13464. Upgrade Gson dependency to version 2.8.9 (#2524)

Change-Id: Ifd3fb9ec6ebfc8874bb799bc198219511fe55a2f

Update pom.xml

Update pom.xml
---
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c7f86ab..6b8dd0a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -138,7 +138,7 @@
 1.1
 5.2.0
 2.2.21
-2.2.4
+2.8.9
 3.2.4
 3.10.6.Final
 4.1.68.Final

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: HDFS-16207. Remove NN logs stack trace for non-existent xattr query (#3375)

2021-09-09 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 7f32a31  HDFS-16207. Remove NN logs stack trace for non-existent xattr 
query (#3375)
7f32a31 is described below

commit 7f32a31abfb654cea7098e7edf0bddeb93981f15
Author: Ahmed Hussein <50450311+amahuss...@users.noreply.github.com>
AuthorDate: Wed Sep 8 23:21:16 2021 -0500

HDFS-16207. Remove NN logs stack trace for non-existent xattr query (#3375)

Change-Id: Ibde523b20a6b8ac92991da52583e625a018d2ee6
---
 .../hdfs/protocol/XAttrNotFoundException.java  | 40 ++
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |  7 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java|  4 ++-
 .../tools/offlineImageViewer/FSImageLoader.java|  4 +--
 .../java/org/apache/hadoop/hdfs/TestDFSShell.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java  |  5 +--
 6 files changed, 53 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
new file mode 100644
index 000..d958491
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The exception that happens when you ask to get a non existing XAttr.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class XAttrNotFoundException extends IOException {
+  private static final long serialVersionUID = -6506239904158794057L;
+  public static final String DEFAULT_EXCEPTION_MSG =
+  "At least one of the attributes provided was not found.";
+  public XAttrNotFoundException() {
+this(DEFAULT_EXCEPTION_MSG);
+  }
+  public XAttrNotFoundException(String msg) {
+super(msg);
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index ce2e93c..3a417e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.XAttrNotFoundException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
@@ -109,8 +110,7 @@ class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  throw new IOException(
-  "At least one of the attributes provided was not found.");
+  throw new XAttrNotFoundException();
 }
 List toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {
@@ -124,8 +124,7 @@ class FSDirXAttrOp {
 }
   }
   if (!foundIt) {
-throw new IOException(
-"At least one of the attributes provided was not found.");
+throw new XAttrNotFoundException();
   }
 }
 return toGet;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 620a017..a14ff2

[hadoop] branch branch-3.2 updated: HDFS-16207. Remove NN logs stack trace for non-existent xattr query (#3375)

2021-09-08 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 1944e0d  HDFS-16207. Remove NN logs stack trace for non-existent xattr 
query (#3375)
1944e0d is described below

commit 1944e0d714dfa2d9aaad003aecc7fafdb352ed49
Author: Ahmed Hussein <50450311+amahuss...@users.noreply.github.com>
AuthorDate: Wed Sep 8 23:21:16 2021 -0500

HDFS-16207. Remove NN logs stack trace for non-existent xattr query (#3375)

Change-Id: Ibde523b20a6b8ac92991da52583e625a018d2ee6
---
 .../hdfs/protocol/XAttrNotFoundException.java  | 40 ++
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |  7 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java|  4 ++-
 .../tools/offlineImageViewer/FSImageLoader.java|  4 +--
 .../java/org/apache/hadoop/hdfs/TestDFSShell.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java  |  5 +--
 6 files changed, 53 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
new file mode 100644
index 000..d958491
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The exception that happens when you ask to get a non existing XAttr.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class XAttrNotFoundException extends IOException {
+  private static final long serialVersionUID = -6506239904158794057L;
+  public static final String DEFAULT_EXCEPTION_MSG =
+  "At least one of the attributes provided was not found.";
+  public XAttrNotFoundException() {
+this(DEFAULT_EXCEPTION_MSG);
+  }
+  public XAttrNotFoundException(String msg) {
+super(msg);
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index ff82610..88abec0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.XAttrNotFoundException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -114,8 +115,7 @@ class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  throw new IOException(
-  "At least one of the attributes provided was not found.");
+  throw new XAttrNotFoundException();
 }
 List toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {
@@ -129,8 +129,7 @@ class FSDirXAttrOp {
 }
   }
   if (!foundIt) {
-throw new IOException(
-"At least one of the attributes provided was not found.");
+throw new XAttrNotFoundException();
   }
 }
 return toGet;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d

[hadoop] branch branch-3.3 updated: HDFS-16207. Remove NN logs stack trace for non-existent xattr query (#3375)

2021-09-08 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 1f61944  HDFS-16207. Remove NN logs stack trace for non-existent xattr 
query (#3375)
1f61944 is described below

commit 1f61944e3be1f8efe273d44e77ed3f4126b08b95
Author: Ahmed Hussein <50450311+amahuss...@users.noreply.github.com>
AuthorDate: Wed Sep 8 23:21:16 2021 -0500

HDFS-16207. Remove NN logs stack trace for non-existent xattr query (#3375)

Change-Id: Ibde523b20a6b8ac92991da52583e625a018d2ee6
---
 .../hdfs/protocol/XAttrNotFoundException.java  | 40 ++
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |  7 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java|  4 ++-
 .../tools/offlineImageViewer/FSImageLoader.java|  4 +--
 .../java/org/apache/hadoop/hdfs/TestDFSShell.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java  |  5 +--
 6 files changed, 53 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
new file mode 100644
index 000..d958491
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The exception that happens when you ask to get a non existing XAttr.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class XAttrNotFoundException extends IOException {
+  private static final long serialVersionUID = -6506239904158794057L;
+  public static final String DEFAULT_EXCEPTION_MSG =
+  "At least one of the attributes provided was not found.";
+  public XAttrNotFoundException() {
+this(DEFAULT_EXCEPTION_MSG);
+  }
+  public XAttrNotFoundException(String msg) {
+super(msg);
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index ce78b5b..7f16910 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.XAttrNotFoundException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -114,8 +115,7 @@ class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  throw new IOException(
-  "At least one of the attributes provided was not found.");
+  throw new XAttrNotFoundException();
 }
 List toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {
@@ -129,8 +129,7 @@ class FSDirXAttrOp {
 }
   }
   if (!foundIt) {
-throw new IOException(
-"At least one of the attributes provided was not found.");
+throw new XAttrNotFoundException();
   }
 }
 return toGet;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 3a

[hadoop] branch trunk updated: HDFS-16207. Remove NN logs stack trace for non-existent xattr query (#3375)

2021-09-08 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bddc9bf  HDFS-16207. Remove NN logs stack trace for non-existent xattr 
query (#3375)
bddc9bf is described below

commit bddc9bf63c3adb3d7445547bd1f8272e53b40bf7
Author: Ahmed Hussein <50450311+amahuss...@users.noreply.github.com>
AuthorDate: Wed Sep 8 23:21:16 2021 -0500

HDFS-16207. Remove NN logs stack trace for non-existent xattr query (#3375)
---
 .../hdfs/protocol/XAttrNotFoundException.java  | 40 ++
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |  7 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java|  4 ++-
 .../tools/offlineImageViewer/FSImageLoader.java|  4 +--
 .../java/org/apache/hadoop/hdfs/TestDFSShell.java  |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java  |  5 +--
 6 files changed, 53 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
new file mode 100644
index 000..d958491
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/XAttrNotFoundException.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The exception that happens when you ask to get a non existing XAttr.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class XAttrNotFoundException extends IOException {
+  private static final long serialVersionUID = -6506239904158794057L;
+  public static final String DEFAULT_EXCEPTION_MSG =
+  "At least one of the attributes provided was not found.";
+  public XAttrNotFoundException() {
+this(DEFAULT_EXCEPTION_MSG);
+  }
+  public XAttrNotFoundException(String msg) {
+super(msg);
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 96dfdf9..632cff9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.XAttrNotFoundException;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -116,8 +117,7 @@ public class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  throw new IOException(
-  "At least one of the attributes provided was not found.");
+  throw new XAttrNotFoundException();
 }
 List toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {
@@ -131,8 +131,7 @@ public class FSDirXAttrOp {
 }
   }
   if (!foundIt) {
-throw new IOException(
-"At least one of the attributes provided was not found.");
+throw new XAttrNotFoundException();
   }
 }
 return toGet;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 580a991..e29fa67 100644
--- 
a/hadoop-hdfs-project/hadoop-h

[hadoop] branch branch-3.2 updated: HADOOP-15129. Datanode caches namenode DNS lookup failure and cannot startup (#3348) Co-authored-by: Karthik Palaniappan

2021-09-03 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 736367e  HADOOP-15129. Datanode caches namenode DNS lookup failure and 
cannot startup (#3348) Co-authored-by:  Karthik Palaniappan
736367e is described below

commit 736367e578a5092dc6ccf94547dfb2b3d2b90528
Author: Chris Nauroth 
AuthorDate: Fri Sep 3 18:41:56 2021 +

HADOOP-15129. Datanode caches namenode DNS lookup failure and cannot 
startup (#3348)
Co-authored-by:  Karthik Palaniappan

Change-Id: Id079a5319e5e83939d5dcce5fb9ebe3715ee864f
---
 .../main/java/org/apache/hadoop/ipc/Client.java| 19 
 .../test/java/org/apache/hadoop/ipc/TestIPC.java   | 52 ++
 2 files changed, 62 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 45baa06..b9ce20a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -655,6 +655,16 @@ public class Client implements AutoCloseable {
   short timeoutFailures = 0;
   while (true) {
 try {
+  if (server.isUnresolved()) {
+// Jump into the catch block. updateAddress() will re-resolve
+// the address if this is just a temporary DNS failure. If not,
+// it will timeout after max ipc client retries
+throw NetUtils.wrapException(server.getHostName(),
+server.getPort(),
+NetUtils.getHostname(),
+0,
+new UnknownHostException());
+  }
   this.socket = socketFactory.createSocket();
   this.socket.setTcpNoDelay(tcpNoDelay);
   this.socket.setKeepAlive(true);
@@ -1604,15 +1614,6 @@ public class Client implements AutoCloseable {
   private Connection getConnection(ConnectionId remoteId,
   Call call, int serviceClass, AtomicBoolean fallbackToSimpleAuth)
   throws IOException {
-final InetSocketAddress address = remoteId.getAddress();
-if (address.isUnresolved()) {
-  throw NetUtils.wrapException(address.getHostName(),
-  address.getPort(),
-  null,
-  0,
-  new UnknownHostException());
-}
-
 final Consumer removeMethod = c -> {
   final boolean removed = connections.remove(remoteId, c);
   if (removed && connections.isEmpty()) {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 07132da..c75391c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -46,6 +46,7 @@ import java.net.Socket;
 import java.net.SocketAddress;
 import java.net.SocketException;
 import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
@@ -53,6 +54,7 @@ import java.util.List;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.ExecutorService;
@@ -87,6 +89,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
@@ -786,6 +789,55 @@ public class TestIPC {
 }
   }
 
+  @Test(timeout=6)
+  public void testIpcHostResolutionTimeout() throws Exception {
+final InetSocketAddress addr = new InetSocketAddress("host.invalid", 80);
+
+// start client
+Client.setConnectTimeout(conf, 100);
+final Client client = new Client(LongWritable.class, conf);
+// set the rpc timeout to twice the MIN_SLEEP_TIME
+try {
+  LambdaTestUtils.intercept(UnknownHostException.class,
+  new Callable() {
+@Override
+public Void call() throws IOException {
+  TestIPC.this.call(client, new LongWritable(RANDOM.nextLong()),
+  addr, MIN_SLEEP_TIME * 2, conf);
+  return null;
+}
+  });
+} finally {
+  client.stop();
+

[hadoop] branch branch-3.3 updated: HADOOP-15129. Datanode caches namenode DNS lookup failure and cannot startup (#3348) Co-authored-by: Karthik Palaniappan

2021-09-03 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new cc90b4f  HADOOP-15129. Datanode caches namenode DNS lookup failure and 
cannot startup (#3348) Co-authored-by:  Karthik Palaniappan
cc90b4f is described below

commit cc90b4f987cde9e2fb094782582f3e1ba989fa62
Author: Chris Nauroth 
AuthorDate: Fri Sep 3 18:41:56 2021 +

HADOOP-15129. Datanode caches namenode DNS lookup failure and cannot 
startup (#3348)
Co-authored-by:  Karthik Palaniappan

Change-Id: Id079a5319e5e83939d5dcce5fb9ebe3715ee864f
---
 .../main/java/org/apache/hadoop/ipc/Client.java| 19 
 .../test/java/org/apache/hadoop/ipc/TestIPC.java   | 52 ++
 2 files changed, 62 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 5585744..3aa7b03 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -655,6 +655,16 @@ public class Client implements AutoCloseable {
   short timeoutFailures = 0;
   while (true) {
 try {
+  if (server.isUnresolved()) {
+// Jump into the catch block. updateAddress() will re-resolve
+// the address if this is just a temporary DNS failure. If not,
+// it will timeout after max ipc client retries
+throw NetUtils.wrapException(server.getHostName(),
+server.getPort(),
+NetUtils.getHostname(),
+0,
+new UnknownHostException());
+  }
   this.socket = socketFactory.createSocket();
   this.socket.setTcpNoDelay(tcpNoDelay);
   this.socket.setKeepAlive(true);
@@ -1604,15 +1614,6 @@ public class Client implements AutoCloseable {
   private Connection getConnection(ConnectionId remoteId,
   Call call, int serviceClass, AtomicBoolean fallbackToSimpleAuth)
   throws IOException {
-final InetSocketAddress address = remoteId.getAddress();
-if (address.isUnresolved()) {
-  throw NetUtils.wrapException(address.getHostName(),
-  address.getPort(),
-  null,
-  0,
-  new UnknownHostException());
-}
-
 final Consumer removeMethod = c -> {
   final boolean removed = connections.remove(remoteId, c);
   if (removed && connections.isEmpty()) {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 3288152..1b79784 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -47,6 +47,7 @@ import java.net.Socket;
 import java.net.SocketAddress;
 import java.net.SocketException;
 import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
@@ -54,6 +55,7 @@ import java.util.List;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.ExecutorService;
@@ -88,6 +90,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
@@ -789,6 +792,55 @@ public class TestIPC {
 }
   }
 
+  @Test(timeout=6)
+  public void testIpcHostResolutionTimeout() throws Exception {
+final InetSocketAddress addr = new InetSocketAddress("host.invalid", 80);
+
+// start client
+Client.setConnectTimeout(conf, 100);
+final Client client = new Client(LongWritable.class, conf);
+// set the rpc timeout to twice the MIN_SLEEP_TIME
+try {
+  LambdaTestUtils.intercept(UnknownHostException.class,
+  new Callable() {
+@Override
+public Void call() throws IOException {
+  TestIPC.this.call(client, new LongWritable(RANDOM.nextLong()),
+  addr, MIN_SLEEP_TIME * 2, conf);
+  return null;
+}
+  });
+} finally {
+  client.stop();
+

[hadoop] branch trunk updated: HADOOP-15129. Datanode caches namenode DNS lookup failure and cannot startup (#3348) Co-authored-by: Karthik Palaniappan

2021-09-03 Thread cnauroth
This is an automated email from the ASF dual-hosted git repository.

cnauroth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1d808f5  HADOOP-15129. Datanode caches namenode DNS lookup failure and 
cannot startup (#3348) Co-authored-by:  Karthik Palaniappan
1d808f5 is described below

commit 1d808f59d79194f0491938c4421dc518fd3e56b8
Author: Chris Nauroth 
AuthorDate: Fri Sep 3 18:41:56 2021 +

HADOOP-15129. Datanode caches namenode DNS lookup failure and cannot 
startup (#3348)
Co-authored-by:  Karthik Palaniappan

Change-Id: Id079a5319e5e83939d5dcce5fb9ebe3715ee864f
---
 .../main/java/org/apache/hadoop/ipc/Client.java| 19 
 .../test/java/org/apache/hadoop/ipc/TestIPC.java   | 52 ++
 2 files changed, 62 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 3c737ba..712db04 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -655,6 +655,16 @@ public class Client implements AutoCloseable {
   short timeoutFailures = 0;
   while (true) {
 try {
+  if (server.isUnresolved()) {
+// Jump into the catch block. updateAddress() will re-resolve
+// the address if this is just a temporary DNS failure. If not,
+// it will timeout after max ipc client retries
+throw NetUtils.wrapException(server.getHostName(),
+server.getPort(),
+NetUtils.getHostname(),
+0,
+new UnknownHostException());
+  }
   this.socket = socketFactory.createSocket();
   this.socket.setTcpNoDelay(tcpNoDelay);
   this.socket.setKeepAlive(true);
@@ -1604,15 +1614,6 @@ public class Client implements AutoCloseable {
   private Connection getConnection(ConnectionId remoteId,
   Call call, int serviceClass, AtomicBoolean fallbackToSimpleAuth)
   throws IOException {
-final InetSocketAddress address = remoteId.getAddress();
-if (address.isUnresolved()) {
-  throw NetUtils.wrapException(address.getHostName(),
-  address.getPort(),
-  null,
-  0,
-  new UnknownHostException());
-}
-
 final Consumer removeMethod = c -> {
   final boolean removed = connections.remove(remoteId, c);
   if (removed && connections.isEmpty()) {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index d486c7e..99047ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -47,6 +47,7 @@ import java.net.Socket;
 import java.net.SocketAddress;
 import java.net.SocketException;
 import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
@@ -54,6 +55,7 @@ import java.util.List;
 import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.ExecutorService;
@@ -88,6 +90,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
@@ -789,6 +792,55 @@ public class TestIPC {
 }
   }
 
+  @Test(timeout=6)
+  public void testIpcHostResolutionTimeout() throws Exception {
+final InetSocketAddress addr = new InetSocketAddress("host.invalid", 80);
+
+// start client
+Client.setConnectTimeout(conf, 100);
+final Client client = new Client(LongWritable.class, conf);
+// set the rpc timeout to twice the MIN_SLEEP_TIME
+try {
+  LambdaTestUtils.intercept(UnknownHostException.class,
+  new Callable() {
+@Override
+public Void call() throws IOException {
+  TestIPC.this.call(client, new LongWritable(RANDOM.nextLong()),
+  addr, MIN_SLEEP_TIME * 2, conf);
+  return null;
+}
+  });
+} finally {
+  client.stop();
+}
+ 

hadoop git commit: HADOOP-14301. Deprecate SharedInstanceProfileCredentialsProvider in branch-2. Contributed by Mingliang Liu.

2017-04-12 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 607295d21 -> 1bfb38229


HADOOP-14301. Deprecate SharedInstanceProfileCredentialsProvider in branch-2. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bfb3822
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bfb3822
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bfb3822

Branch: refs/heads/branch-2
Commit: 1bfb382296ab6f980341c926801b0eec57a68b87
Parents: 607295d
Author: Chris Nauroth 
Authored: Wed Apr 12 10:07:00 2017 -0700
Committer: Chris Nauroth 
Committed: Wed Apr 12 10:07:00 2017 -0700

--
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 13 
 ...haredInstanceProfileCredentialsProvider.java |  9 +++--
 .../src/site/markdown/tools/hadoop-aws/index.md | 35 ++--
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 14 ++--
 4 files changed, 41 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bfb3822/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 6a11699..b652b3b 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -339,14 +339,15 @@ public final class S3AUtils {
   credentials.add(new BasicAWSCredentialsProvider(
   creds.getUser(), creds.getPassword()));
   credentials.add(new EnvironmentVariableCredentialsProvider());
-  credentials.add(
-  SharedInstanceProfileCredentialsProvider.getInstance());
+  credentials.add(InstanceProfileCredentialsProvider.getInstance());
 } else {
   for (Class aClass : awsClasses) {
-if (aClass == InstanceProfileCredentialsProvider.class) {
-  LOG.debug("Found {}, but will use {} instead.", aClass.getName(),
-  SharedInstanceProfileCredentialsProvider.class.getName());
-  aClass = SharedInstanceProfileCredentialsProvider.class;
+if (aClass == SharedInstanceProfileCredentialsProvider.class) {
+  LOG.warn("{} is deprecated and will be removed in future. " +
+  "Fall back to {} automatically.",
+  aClass.getName(),
+  InstanceProfileCredentialsProvider.class.getName());
+  aClass = InstanceProfileCredentialsProvider.class;
 }
 credentials.add(createAWSCredentialProvider(conf, aClass));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bfb3822/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
index cbc0787..d30444b 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
@@ -37,12 +37,15 @@ import org.apache.hadoop.classification.InterfaceStability;
  * {@link S3AFileSystem} connecting to different buckets, so sharing a 
singleton
  * instance is safe.
  *
- * As of AWS SDK 1.11.39, the SDK code internally enforces a singleton.  After
- * Hadoop upgrades to that version or higher, it's likely that we can remove
- * this class.
+ * As of AWS SDK 1.11.39, the SDK code internally enforces a singleton.  Hadoop
+ * has upgraded its dependency to 1.11.39+ so this class is deprecated. In
+ * next major version, this will be removed.
+ *
+ * @deprecated Please use {@link InstanceProfileCredentialsProvider} instead.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
+@Deprecated
 public final class SharedInstanceProfileCredentialsProvider
 extends InstanceProfileCredentialsProvider {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bfb3822/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 07cc903..4174141 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ 

hadoop git commit: HADOOP-14248. Retire SharedInstanceProfileCredentialsProvider in trunk. Contributed by Mingliang Liu.

2017-04-12 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/trunk a16ab2be9 -> b8305e6d0


HADOOP-14248. Retire SharedInstanceProfileCredentialsProvider in trunk. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8305e6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8305e6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8305e6d

Branch: refs/heads/trunk
Commit: b8305e6d06cbb9d44c728da14506d5cf825c12e5
Parents: a16ab2b
Author: Chris Nauroth 
Authored: Wed Apr 12 10:02:13 2017 -0700
Committer: Chris Nauroth 
Committed: Wed Apr 12 10:02:13 2017 -0700

--
 .../src/main/resources/core-default.xml |  9 +--
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  8 +--
 ...haredInstanceProfileCredentialsProvider.java | 67 
 .../src/site/markdown/tools/hadoop-aws/index.md | 33 +++---
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  4 +-
 5 files changed, 13 insertions(+), 108 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8305e6d/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 03e4996..4f37c65 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -955,13 +955,8 @@
 configuration of AWS access key ID and secret access key in
 environment variables named AWS_ACCESS_KEY_ID and
 AWS_SECRET_ACCESS_KEY, as documented in the AWS SDK.
-3. org.apache.hadoop.fs.s3a.SharedInstanceProfileCredentialsProvider:
-a shared instance of
-com.amazonaws.auth.InstanceProfileCredentialsProvider from the AWS
-SDK, which supports use of instance profile credentials if running
-in an EC2 VM.  Using this shared instance potentially reduces load
-on the EC2 instance metadata service for multi-threaded
-applications.
+3. com.amazonaws.auth.InstanceProfileCredentialsProvider: supports use
+of instance profile credentials if running in an EC2 VM.
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8305e6d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 6a11699..5ff9321 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -339,15 +339,9 @@ public final class S3AUtils {
   credentials.add(new BasicAWSCredentialsProvider(
   creds.getUser(), creds.getPassword()));
   credentials.add(new EnvironmentVariableCredentialsProvider());
-  credentials.add(
-  SharedInstanceProfileCredentialsProvider.getInstance());
+  credentials.add(InstanceProfileCredentialsProvider.getInstance());
 } else {
   for (Class aClass : awsClasses) {
-if (aClass == InstanceProfileCredentialsProvider.class) {
-  LOG.debug("Found {}, but will use {} instead.", aClass.getName(),
-  SharedInstanceProfileCredentialsProvider.class.getName());
-  aClass = SharedInstanceProfileCredentialsProvider.class;
-}
 credentials.add(createAWSCredentialProvider(conf, aClass));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8305e6d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
deleted file mode 100644
index cbc0787..000
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SharedInstanceProfileCredentialsProvider.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except 

hadoop git commit: HDFS-11163. Mover should move the file blocks to default storage once policy is unset. Contributed by Surendra Singh Lilhore.

2017-04-11 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/trunk 62e4573ef -> 23b1a7bdf


HDFS-11163. Mover should move the file blocks to default storage once policy is 
unset. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23b1a7bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23b1a7bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23b1a7bd

Branch: refs/heads/trunk
Commit: 23b1a7bdf1b546c1e29d7010cf139b6d700461fc
Parents: 62e4573
Author: Chris Nauroth 
Authored: Tue Apr 11 15:01:49 2017 -0700
Committer: Chris Nauroth 
Committed: Tue Apr 11 22:03:09 2017 -0700

--
 .../org/apache/hadoop/fs/FsServerDefaults.java  | 19 -
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  4 +-
 .../src/main/proto/hdfs.proto   |  1 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 11 +++--
 .../hdfs/server/namenode/FSNamesystem.java  |  5 ++-
 .../apache/hadoop/hdfs/TestFileCreation.java|  2 +
 .../hadoop/hdfs/server/mover/TestMover.java | 42 
 7 files changed, 77 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b1a7bd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
index 469243c..84a40d2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
@@ -55,6 +55,7 @@ public class FsServerDefaults implements Writable {
   private long trashInterval;
   private DataChecksum.Type checksumType;
   private String keyProviderUri;
+  private byte storagepolicyId;
 
   public FsServerDefaults() {
   }
@@ -62,8 +63,17 @@ public class FsServerDefaults implements Writable {
   public FsServerDefaults(long blockSize, int bytesPerChecksum,
   int writePacketSize, short replication, int fileBufferSize,
   boolean encryptDataTransfer, long trashInterval,
+  DataChecksum.Type checksumType, String keyProviderUri) {
+this(blockSize, bytesPerChecksum, writePacketSize, replication,
+fileBufferSize, encryptDataTransfer, trashInterval, checksumType,
+keyProviderUri, (byte) 0);
+  }
+
+  public FsServerDefaults(long blockSize, int bytesPerChecksum,
+  int writePacketSize, short replication, int fileBufferSize,
+  boolean encryptDataTransfer, long trashInterval,
   DataChecksum.Type checksumType,
-  String keyProviderUri) {
+  String keyProviderUri, byte storagepolicy) {
 this.blockSize = blockSize;
 this.bytesPerChecksum = bytesPerChecksum;
 this.writePacketSize = writePacketSize;
@@ -73,6 +83,7 @@ public class FsServerDefaults implements Writable {
 this.trashInterval = trashInterval;
 this.checksumType = checksumType;
 this.keyProviderUri = keyProviderUri;
+this.storagepolicyId = storagepolicy;
   }
 
   public long getBlockSize() {
@@ -115,6 +126,10 @@ public class FsServerDefaults implements Writable {
 return keyProviderUri;
   }
 
+  public byte getDefaultStoragePolicyId() {
+return storagepolicyId;
+  }
+
   // /
   // Writable
   // /
@@ -127,6 +142,7 @@ public class FsServerDefaults implements Writable {
 out.writeShort(replication);
 out.writeInt(fileBufferSize);
 WritableUtils.writeEnum(out, checksumType);
+out.writeByte(storagepolicyId);
   }
 
   @Override
@@ -138,5 +154,6 @@ public class FsServerDefaults implements Writable {
 replication = in.readShort();
 fileBufferSize = in.readInt();
 checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
+storagepolicyId = in.readByte();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b1a7bd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 125b53f..98d7ef9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 

[1/2] hadoop git commit: HDFS-11163. Mover should move the file blocks to default storage once policy is unset. Contributed by Surendra Singh Lilhore.

2017-04-11 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f1a85b2be -> d5e2bd409
  refs/heads/branch-2.8 32475a7b6 -> c4bf50439


HDFS-11163. Mover should move the file blocks to default storage once policy is 
unset. Contributed by Surendra Singh Lilhore.

(cherry picked from commit 00ed21a6fedb45a7c8992b8d45adaa83f14af34c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5e2bd40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5e2bd40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5e2bd40

Branch: refs/heads/branch-2
Commit: d5e2bd4096bf2b4d8a5a22042145a08905f93cd4
Parents: f1a85b2
Author: Chris Nauroth 
Authored: Tue Apr 11 15:01:49 2017 -0700
Committer: Chris Nauroth 
Committed: Tue Apr 11 21:18:47 2017 -0700

--
 .../org/apache/hadoop/fs/FsServerDefaults.java  | 21 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  4 +-
 .../src/main/proto/hdfs.proto   |  1 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 11 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |  5 +-
 .../apache/hadoop/hdfs/TestFileCreation.java|  1 +
 .../hadoop/hdfs/server/mover/TestMover.java | 74 
 7 files changed, 109 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5e2bd40/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
index 469243c..9933e5d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
@@ -55,6 +55,7 @@ public class FsServerDefaults implements Writable {
   private long trashInterval;
   private DataChecksum.Type checksumType;
   private String keyProviderUri;
+  private byte storagepolicyId;
 
   public FsServerDefaults() {
   }
@@ -62,8 +63,17 @@ public class FsServerDefaults implements Writable {
   public FsServerDefaults(long blockSize, int bytesPerChecksum,
   int writePacketSize, short replication, int fileBufferSize,
   boolean encryptDataTransfer, long trashInterval,
-  DataChecksum.Type checksumType,
-  String keyProviderUri) {
+  DataChecksum.Type checksumType, String keyProviderUri) {
+this(blockSize, bytesPerChecksum, writePacketSize, replication,
+fileBufferSize, encryptDataTransfer, trashInterval, checksumType,
+keyProviderUri, (byte) 0);
+  }
+
+  public FsServerDefaults(long blockSize, int bytesPerChecksum,
+  int writePacketSize, short replication, int fileBufferSize,
+  boolean encryptDataTransfer, long trashInterval,
+  DataChecksum.Type checksumType, String keyProviderUri,
+  byte storagepolicy) {
 this.blockSize = blockSize;
 this.bytesPerChecksum = bytesPerChecksum;
 this.writePacketSize = writePacketSize;
@@ -73,6 +83,7 @@ public class FsServerDefaults implements Writable {
 this.trashInterval = trashInterval;
 this.checksumType = checksumType;
 this.keyProviderUri = keyProviderUri;
+this.storagepolicyId = storagepolicy;
   }
 
   public long getBlockSize() {
@@ -115,6 +126,10 @@ public class FsServerDefaults implements Writable {
 return keyProviderUri;
   }
 
+  public byte getDefaultStoragePolicyId() {
+return storagepolicyId;
+  }
+
   // /
   // Writable
   // /
@@ -127,6 +142,7 @@ public class FsServerDefaults implements Writable {
 out.writeShort(replication);
 out.writeInt(fileBufferSize);
 WritableUtils.writeEnum(out, checksumType);
+out.writeByte(storagepolicyId);
   }
 
   @Override
@@ -138,5 +154,6 @@ public class FsServerDefaults implements Writable {
 replication = in.readShort();
 fileBufferSize = in.readInt();
 checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
+storagepolicyId = in.readByte();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5e2bd40/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 110c819..29335e2 100644
--- 

[2/2] hadoop git commit: HDFS-11163. Mover should move the file blocks to default storage once policy is unset. Contributed by Surendra Singh Lilhore.

2017-04-11 Thread cnauroth
HDFS-11163. Mover should move the file blocks to default storage once policy is 
unset. Contributed by Surendra Singh Lilhore.

(cherry picked from commit 00ed21a6fedb45a7c8992b8d45adaa83f14af34c)
(cherry picked from commit d5e2bd4096bf2b4d8a5a22042145a08905f93cd4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4bf5043
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4bf5043
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4bf5043

Branch: refs/heads/branch-2.8
Commit: c4bf504395d0594c6496439bfe59d78a606e16de
Parents: 32475a7
Author: Chris Nauroth 
Authored: Tue Apr 11 15:01:49 2017 -0700
Committer: Chris Nauroth 
Committed: Tue Apr 11 21:55:28 2017 -0700

--
 .../org/apache/hadoop/fs/FsServerDefaults.java  | 21 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  4 +-
 .../src/main/proto/hdfs.proto   |  1 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 11 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |  5 +-
 .../apache/hadoop/hdfs/TestFileCreation.java|  1 +
 .../hadoop/hdfs/server/mover/TestMover.java | 74 
 7 files changed, 109 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4bf5043/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
index 469243c..9933e5d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
@@ -55,6 +55,7 @@ public class FsServerDefaults implements Writable {
   private long trashInterval;
   private DataChecksum.Type checksumType;
   private String keyProviderUri;
+  private byte storagepolicyId;
 
   public FsServerDefaults() {
   }
@@ -62,8 +63,17 @@ public class FsServerDefaults implements Writable {
   public FsServerDefaults(long blockSize, int bytesPerChecksum,
   int writePacketSize, short replication, int fileBufferSize,
   boolean encryptDataTransfer, long trashInterval,
-  DataChecksum.Type checksumType,
-  String keyProviderUri) {
+  DataChecksum.Type checksumType, String keyProviderUri) {
+this(blockSize, bytesPerChecksum, writePacketSize, replication,
+fileBufferSize, encryptDataTransfer, trashInterval, checksumType,
+keyProviderUri, (byte) 0);
+  }
+
+  public FsServerDefaults(long blockSize, int bytesPerChecksum,
+  int writePacketSize, short replication, int fileBufferSize,
+  boolean encryptDataTransfer, long trashInterval,
+  DataChecksum.Type checksumType, String keyProviderUri,
+  byte storagepolicy) {
 this.blockSize = blockSize;
 this.bytesPerChecksum = bytesPerChecksum;
 this.writePacketSize = writePacketSize;
@@ -73,6 +83,7 @@ public class FsServerDefaults implements Writable {
 this.trashInterval = trashInterval;
 this.checksumType = checksumType;
 this.keyProviderUri = keyProviderUri;
+this.storagepolicyId = storagepolicy;
   }
 
   public long getBlockSize() {
@@ -115,6 +126,10 @@ public class FsServerDefaults implements Writable {
 return keyProviderUri;
   }
 
+  public byte getDefaultStoragePolicyId() {
+return storagepolicyId;
+  }
+
   // /
   // Writable
   // /
@@ -127,6 +142,7 @@ public class FsServerDefaults implements Writable {
 out.writeShort(replication);
 out.writeInt(fileBufferSize);
 WritableUtils.writeEnum(out, checksumType);
+out.writeByte(storagepolicyId);
   }
 
   @Override
@@ -138,5 +154,6 @@ public class FsServerDefaults implements Writable {
 replication = in.readShort();
 fileBufferSize = in.readInt();
 checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
+storagepolicyId = in.readByte();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4bf5043/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 42ac9b1..000e211 100644
--- 

hadoop git commit: HADOOP-13589. S3Guard: Allow execution of all S3A integration tests with S3Guard enabled. Contributed by Steve Loughran.

2017-01-18 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 f10114c14 -> 07487b459


HADOOP-13589. S3Guard: Allow execution of all S3A integration tests with 
S3Guard enabled. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07487b45
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07487b45
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07487b45

Branch: refs/heads/HADOOP-13345
Commit: 07487b459d03d66a0855f0cf3818a7503a4b34ae
Parents: f10114c
Author: Chris Nauroth 
Authored: Wed Jan 18 21:17:51 2017 -0800
Committer: Chris Nauroth 
Committed: Wed Jan 18 21:17:51 2017 -0800

--
 hadoop-tools/hadoop-aws/pom.xml | 59 
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  2 +
 .../fs/s3a/s3guard/LocalMetadataStore.java  | 10 
 .../fs/s3a/s3guard/NullMetadataStore.java   |  4 ++
 .../site/markdown/tools/hadoop-aws/s3guard.md   | 42 +-
 .../fs/contract/s3a/ITestS3AContractCreate.java | 14 +
 .../fs/contract/s3a/ITestS3AContractDelete.java | 14 +
 .../fs/contract/s3a/ITestS3AContractDistCp.java |  7 +++
 .../s3a/ITestS3AContractGetFileStatus.java  |  4 ++
 .../fs/contract/s3a/ITestS3AContractMkdir.java  | 14 +
 .../fs/contract/s3a/ITestS3AContractOpen.java   | 14 +
 .../fs/contract/s3a/ITestS3AContractRename.java | 13 +
 .../contract/s3a/ITestS3AContractRootDir.java   | 14 +
 .../fs/contract/s3a/ITestS3AContractSeek.java   | 14 +
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  | 13 +
 .../hadoop/fs/s3a/ITestS3ACredentialsInURL.java |  2 +-
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  | 11 
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 40 +
 .../ITestS3AFileContextStatistics.java  |  4 +-
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   |  2 +-
 20 files changed, 292 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07487b45/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index fa86b4c..09c278c 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -44,6 +44,11 @@
 
unset
 
 3600
+
+false
+
false
+
local
+
   
 
   
@@ -164,6 +169,11 @@
 
${fs.s3a.scale.test.huge.filesize}
 
${fs.s3a.scale.test.huge.partitionsize}
 
${fs.s3a.scale.test.timeout}
+
+
${fs.s3a.s3guard.test.enabled}
+
${fs.s3a.s3guard.test.authoritative}
+
${fs.s3a.s3guard.test.implementation}
+
   
   
   
@@ -203,6 +213,10 @@
 
${fs.s3a.scale.test.huge.filesize}
 
${fs.s3a.scale.test.huge.partitionsize}
 
${fs.s3a.scale.test.timeout}
+
+
${fs.s3a.s3guard.test.enabled}
+
${fs.s3a.s3guard.test.implementation}
+
${fs.s3a.s3guard.test.authoritative}
   
   
   
@@ -243,6 +257,10 @@
 
${fs.s3a.scale.test.enabled}
 
${fs.s3a.scale.test.huge.filesize}
 
${fs.s3a.scale.test.timeout}
+
+
${fs.s3a.s3guard.test.enabled}
+
${fs.s3a.s3guard.test.implementation}
+
${fs.s3a.s3guard.test.authoritative}
   
   
${fs.s3a.scale.test.timeout}
 
@@ -265,6 +283,47 @@
 true
   
 
+
+
+
+  s3guard
+  
+
+  s3guard
+
+  
+  
+true
+  
+
+
+
+
+  dynamo
+  
+
+  dynamo
+
+  
+  
+
dynamo
+  
+
+
+
+
+  non-auth
+  
+
+  auth
+
+  
+  
+
true
+  
+
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07487b45/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 2154ea6..0621bbf 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -2072,6 +2072,8 

hadoop git commit: HADOOP-13908. S3Guard: Existing tables may not be initialized correctly in DynamoDBMetadataStore. Contributed by Mingliang Liu.

2017-01-09 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 e3f20027f -> a5cc315db


HADOOP-13908. S3Guard: Existing tables may not be initialized correctly in 
DynamoDBMetadataStore. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5cc315d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5cc315d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5cc315d

Branch: refs/heads/HADOOP-13345
Commit: a5cc315dbef15e8f708663d45800fdc957797cf2
Parents: e3f2002
Author: Chris Nauroth 
Authored: Mon Jan 9 15:48:30 2017 -0800
Committer: Chris Nauroth 
Committed: Mon Jan 9 15:48:30 2017 -0800

--
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 83 
 .../s3a/s3guard/TestDynamoDBMetadataStore.java  | 27 ++-
 2 files changed, 73 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5cc315d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 45ecaff..6ff0ee1 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -190,10 +190,7 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
 // use the bucket as the DynamoDB table name if not specified in config
 tableName = conf.getTrimmed(S3GUARD_DDB_TABLE_NAME_KEY, bucket);
 
-// create the table unless it's explicitly told not to do so
-if (conf.getBoolean(S3GUARD_DDB_TABLE_CREATE_KEY, false)) {
-  createTable();
-}
+initTable();
   }
 
   /**
@@ -230,7 +227,7 @@ public class DynamoDBMetadataStore implements MetadataStore 
{
 dynamoDB = new DynamoDB(dynamoDBClient);
 region = dynamoDBClient.getEndpointPrefix();
 
-createTable();
+initTable();
   }
 
   @Override
@@ -510,46 +507,64 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
   /**
* Create a table if it does not exist and wait for it to become active.
*
-   * If a table with the intended name already exists, then it logs the
-   * {@link ResourceInUseException} and uses that table. The DynamoDB table
-   * creation API is asynchronous.  This method wait for the table to become
-   * active after sending the creation request, so overall, this method is
-   * synchronous, and the table is guaranteed to exist after this method
-   * returns successfully.
+   * If a table with the intended name already exists, then it uses that table.
+   * Otherwise, it will automatically create the table if the config
+   * {@link org.apache.hadoop.fs.s3a.Constants#S3GUARD_DDB_TABLE_CREATE_KEY} is
+   * enabled. The DynamoDB table creation API is asynchronous.  This method 
wait
+   * for the table to become active after sending the creation request, so
+   * overall, this method is synchronous, and the table is guaranteed to exist
+   * after this method returns successfully.
+   *
+   * @throws IOException if table does not exist and auto-creation is disabled;
+   * or any other I/O exception occurred.
*/
   @VisibleForTesting
-  void createTable() throws IOException {
+  void initTable() throws IOException {
 final ProvisionedThroughput capacity = new ProvisionedThroughput(
 conf.getLong(S3GUARD_DDB_TABLE_CAPACITY_READ_KEY,
 S3GUARD_DDB_TABLE_CAPACITY_READ_DEFAULT),
 conf.getLong(S3GUARD_DDB_TABLE_CAPACITY_WRITE_KEY,
 S3GUARD_DDB_TABLE_CAPACITY_WRITE_DEFAULT));
 
+table = dynamoDB.getTable(tableName);
 try {
-  LOG.info("Creating DynamoDB table {} in region {}", tableName, region);
-  table = dynamoDB.createTable(new CreateTableRequest()
-  .withTableName(tableName)
-  .withKeySchema(keySchema())
-  .withAttributeDefinitions(attributeDefinitions())
-  .withProvisionedThroughput(capacity));
-} catch (ResourceInUseException e) {
-  LOG.info("ResourceInUseException while creating DynamoDB table {} in "
-  + "region {}.  This may indicate that the table was created by "
-  + "another concurrent thread or process.",
-  tableName, region);
-  table = dynamoDB.getTable(tableName);
-}
+  try {
+table.describe();
+LOG.debug("Using existing DynamoDB table {} in region {}",
+tableName, region);
+  } catch (ResourceNotFoundException rnfe) {
+

hadoop git commit: HADOOP-13931. S3AGuard: Use BatchWriteItem in DynamoDBMetadataStore#put(). Contributed by Mingliang Liu.

2017-01-06 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 a412b1020 -> a1b47db40


HADOOP-13931. S3AGuard: Use BatchWriteItem in DynamoDBMetadataStore#put(). 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1b47db4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1b47db4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1b47db4

Branch: refs/heads/HADOOP-13345
Commit: a1b47db405832087c080a4c5743f2efaa620d566
Parents: a412b10
Author: Chris Nauroth 
Authored: Fri Jan 6 10:30:47 2017 -0800
Committer: Chris Nauroth 
Committed: Fri Jan 6 10:30:47 2017 -0800

--
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 51 
 .../hadoop/fs/s3a/s3guard/MetadataStore.java|  5 +-
 2 files changed, 34 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1b47db4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 89ce3c4..1c19625 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -413,9 +413,27 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
 
   @Override
   public void put(PathMetadata meta) throws IOException {
-checkPathMetadata(meta);
+// For a deeply nested path, this method will automatically create the full
+// ancestry and save respective item in DynamoDB table.
+// So after put operation, we maintain the invariant that if a path exists,
+// all its ancestors will also exist in the table.
+// For performance purpose, we generate the full paths to put and use batch
+// write item request to save the items.
 LOG.debug("Saving to table {} in region {}: {}", tableName, region, meta);
-innerPut(meta);
+processBatchWriteRequest(null, pathMetadataToItem(fullPathsToPut(meta)));
+  }
+
+  /**
+   * Helper method to get full path of ancestors that are nonexistent in table.
+   */
+  private Collection fullPathsToPut(PathMetadata meta)
+  throws IOException {
+checkPathMetadata(meta);
+final Collection metasToPut = new ArrayList<>();
+// root path is not persisted
+if (!meta.getFileStatus().getPath().isRoot()) {
+  metasToPut.add(meta);
+}
 
 // put all its ancestors if not present; as an optimization we return at 
its
 // first existent ancestor
@@ -427,34 +445,29 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
   final Item item = table.getItem(spec);
   if (item == null) {
 final S3AFileStatus status = new S3AFileStatus(false, path, username);
-innerPut(new PathMetadata(status));
+metasToPut.add(new PathMetadata(status));
 path = path.getParent();
   } else {
 break;
   }
 }
-  }
-
-  private void innerPut(PathMetadata meta) throws IOException {
-final Path path = meta.getFileStatus().getPath();
-if (path.isRoot()) {
-  LOG.debug("Root path / is not persisted");
-  return;
-}
-
-try {
-  table.putItem(pathMetadataToItem(meta));
-} catch (AmazonClientException e) {
-  throw translateException("put", path, e);
-}
+return metasToPut;
   }
 
   @Override
   public void put(DirListingMetadata meta) throws IOException {
 LOG.debug("Saving to table {} in region {}: {}", tableName, region, meta);
 
-for (PathMetadata pathMetadata : meta.getListing()) {
-  put(pathMetadata);
+// directory path
+final Collection metasToPut = fullPathsToPut(
+new PathMetadata(new S3AFileStatus(false, meta.getPath(), username)));
+// all children of the directory
+metasToPut.addAll(meta.getListing());
+
+try {
+  processBatchWriteRequest(null, pathMetadataToItem(metasToPut));
+} catch (AmazonClientException e) {
+  throw translateException("put", (String) null, e);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1b47db4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/MetadataStore.java
index 6d3c440..5c611c2 100644
--- 

[2/3] hadoop git commit: HDFS-9483. Documentation does not cover use of "swebhdfs" as URL scheme for SSL-secured WebHDFS. Contributed by Surendra Singh Lilhore.

2017-01-05 Thread cnauroth
HDFS-9483. Documentation does not cover use of "swebhdfs" as URL scheme for 
SSL-secured WebHDFS. Contributed by Surendra Singh Lilhore.

(cherry picked from commit 4c8f9e130230457fc897ed7a2a09e14d078be90a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c61858e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c61858e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c61858e

Branch: refs/heads/branch-2
Commit: 9c61858ee754b167a90533375e85ca5bde3cdce1
Parents: 2048673
Author: Chris Nauroth 
Authored: Thu Jan 5 15:04:47 2017 -0800
Committer: Chris Nauroth 
Committed: Thu Jan 5 15:05:01 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 5 +
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm| 1 +
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c61858e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index d75787b..d03490f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -211,6 +211,11 @@ In the REST API, the prefix "`/webhdfs/v1`" is inserted in 
the path and a query
 
   http://:/webhdfs/v1/?op=...
 
+**Note** that if WebHDFS is secured with SSL, then the scheme should be 
"`swebhdfs://`".
+
+  swebhdfs://:/
+
+
 ### HDFS Configuration Options
 
 Below are the HDFS configuration options for WebHDFS.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c61858e/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index 6a70c38..d153485 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -419,6 +419,7 @@ $H3 Copying Between Versions of HDFS
   HftpFileSystem, as webhdfs is available for both read and write operations,
   DistCp can be run on both source and destination cluster.
   Remote cluster is specified as `webhdfs://:`.
+  (Use the "`swebhdfs://`" scheme when webhdfs is secured with SSL).
   When copying between same major versions of Hadoop cluster (e.g. between 2.X
   and 2.X), use hdfs protocol for better performance.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HDFS-9483. Documentation does not cover use of "swebhdfs" as URL scheme for SSL-secured WebHDFS. Contributed by Surendra Singh Lilhore.

2017-01-05 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 20486730e -> 9c61858ee
  refs/heads/branch-2.8 162c6cc9f -> 15e89634a
  refs/heads/trunk 02766b6c2 -> 4c8f9e130


HDFS-9483. Documentation does not cover use of "swebhdfs" as URL scheme for 
SSL-secured WebHDFS. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c8f9e13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c8f9e13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c8f9e13

Branch: refs/heads/trunk
Commit: 4c8f9e130230457fc897ed7a2a09e14d078be90a
Parents: 02766b6
Author: Chris Nauroth 
Authored: Thu Jan 5 15:04:47 2017 -0800
Committer: Chris Nauroth 
Committed: Thu Jan 5 15:04:47 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 5 +
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm| 1 +
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c8f9e13/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 27fd13a..50b4360 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -211,6 +211,11 @@ In the REST API, the prefix "`/webhdfs/v1`" is inserted in 
the path and a query
 
   http://:/webhdfs/v1/?op=...
 
+**Note** that if WebHDFS is secured with SSL, then the scheme should be 
"`swebhdfs://`".
+
+  swebhdfs://:/
+
+
 ### HDFS Configuration Options
 
 Below are the HDFS configuration options for WebHDFS.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c8f9e13/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index fc26321..dbf0e8d 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -417,6 +417,7 @@ $H3 Copying Between Versions of HDFS
   HftpFileSystem, as webhdfs is available for both read and write operations,
   DistCp can be run on both source and destination cluster.
   Remote cluster is specified as `webhdfs://:`.
+  (Use the "`swebhdfs://`" scheme when webhdfs is secured with SSL).
   When copying between same major versions of Hadoop cluster (e.g. between 2.X
   and 2.X), use hdfs protocol for better performance.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HDFS-9483. Documentation does not cover use of "swebhdfs" as URL scheme for SSL-secured WebHDFS. Contributed by Surendra Singh Lilhore.

2017-01-05 Thread cnauroth
HDFS-9483. Documentation does not cover use of "swebhdfs" as URL scheme for 
SSL-secured WebHDFS. Contributed by Surendra Singh Lilhore.

(cherry picked from commit 4c8f9e130230457fc897ed7a2a09e14d078be90a)
(cherry picked from commit 9c61858ee754b167a90533375e85ca5bde3cdce1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15e89634
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15e89634
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15e89634

Branch: refs/heads/branch-2.8
Commit: 15e89634a528eb0ce1f9f6394567ca1a2c24f769
Parents: 162c6cc
Author: Chris Nauroth 
Authored: Thu Jan 5 15:04:47 2017 -0800
Committer: Chris Nauroth 
Committed: Thu Jan 5 15:05:12 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md | 5 +
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm| 1 +
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15e89634/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 45f11d0..716cbec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -200,6 +200,11 @@ In the REST API, the prefix "`/webhdfs/v1`" is inserted in 
the path and a query
 
   http://:/webhdfs/v1/?op=...
 
+**Note** that if WebHDFS is secured with SSL, then the scheme should be 
"`swebhdfs://`".
+
+  swebhdfs://:/
+
+
 ### HDFS Configuration Options
 
 Below are the HDFS configuration options for WebHDFS.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15e89634/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index df9ec69..d2ed9554 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -418,6 +418,7 @@ $H3 Copying Between Versions of HDFS
   HftpFileSystem, as webhdfs is available for both read and write operations,
   DistCp can be run on both source and destination cluster.
   Remote cluster is specified as `webhdfs://:`.
+  (Use the "`swebhdfs://`" scheme when webhdfs is secured with SSL).
   When copying between same major versions of Hadoop cluster (e.g. between 2.X
   and 2.X), use hdfs protocol for better performance.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13934. S3Guard: DynamoDBMetadataStore#move() could be throwing exception due to BatchWriteItem limits. Contributed by Mingliang Liu.

2017-01-05 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 b27317189 -> a412b1020


HADOOP-13934. S3Guard: DynamoDBMetadataStore#move() could be throwing exception 
due to BatchWriteItem limits. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a412b102
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a412b102
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a412b102

Branch: refs/heads/HADOOP-13345
Commit: a412b102078e6b17abcff76b472ec121b15d4e05
Parents: b273171
Author: Chris Nauroth 
Authored: Thu Jan 5 13:09:05 2017 -0800
Committer: Chris Nauroth 
Committed: Thu Jan 5 13:09:05 2017 -0800

--
 .../org/apache/hadoop/fs/s3a/Constants.java |  7 ++
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 62 +--
 .../PathMetadataDynamoDBTranslation.java| 18 +++--
 .../s3a/s3guard/TestDynamoDBMetadataStore.java  | 80 ++--
 4 files changed, 150 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a412b102/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 4e7dfd6..c6acaaa 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -335,6 +335,13 @@ public final class Constants {
   public static final long S3GUARD_DDB_TABLE_CAPACITY_WRITE_DEFAULT = 100;
 
   /**
+   * The maximum put or delete requests per BatchWriteItem request.
+   *
+   * Refer to Amazon API reference for this limit.
+   */
+  public static final int S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT = 25;
+
+  /**
* V1 committer.
*/
   @InterfaceStability.Unstable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a412b102/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 439002b..89ce3c4 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.s3a.s3guard;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -31,6 +32,7 @@ import 
com.amazonaws.services.dynamodbv2.document.BatchWriteItemOutcome;
 import com.amazonaws.services.dynamodbv2.document.DynamoDB;
 import com.amazonaws.services.dynamodbv2.document.Item;
 import com.amazonaws.services.dynamodbv2.document.ItemCollection;
+import com.amazonaws.services.dynamodbv2.document.PrimaryKey;
 import com.amazonaws.services.dynamodbv2.document.QueryOutcome;
 import com.amazonaws.services.dynamodbv2.document.Table;
 import com.amazonaws.services.dynamodbv2.document.TableWriteItems;
@@ -344,20 +346,68 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
   @Override
   public void move(Collection pathsToDelete,
   Collection pathsToCreate) throws IOException {
-final TableWriteItems writeItems = new TableWriteItems(tableName)
-.withItemsToPut(pathMetadataToItem(pathsToCreate))
-.withPrimaryKeysToDelete(pathToKey(pathsToDelete));
+if (pathsToDelete == null && pathsToCreate == null) {
+  return;
+}
+
+LOG.debug("Moving paths of table {} in region {}: {} paths to delete and 
{}"
++ " paths to create", tableName, region,
+pathsToDelete == null ? 0 : pathsToDelete.size(),
+pathsToCreate == null ? 0 : pathsToCreate.size());
+LOG.trace("move: pathsToDelete = {}, pathsToCreate = {}",
+pathsToDelete, pathsToCreate);
 try {
-  BatchWriteItemOutcome res = dynamoDB.batchWriteItem(writeItems);
+  processBatchWriteRequest(pathToKey(pathsToDelete),
+  pathMetadataToItem(pathsToCreate));
+} catch (AmazonClientException e) {
+  throw translateException("move", (String) null, e);
+}
+  }
 
+  /**
+   * Helper method to issue a batch write request to DynamoDB.
+   *
+   * Callers of this method should catch the {@link AmazonClientException} and
+   * translate it 

hadoop git commit: HADOOP-13922. Some modules have dependencies on hadoop-client jar removed by HADOOP-11804. Contributed by Sean Busbey.

2017-01-03 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/trunk 451efb08f -> ebdd2e03b


HADOOP-13922. Some modules have dependencies on hadoop-client jar removed by 
HADOOP-11804. Contributed by Sean Busbey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebdd2e03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebdd2e03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebdd2e03

Branch: refs/heads/trunk
Commit: ebdd2e03b7b8573cc3531958dbfda72cdbc277fd
Parents: 451efb0
Author: Chris Nauroth 
Authored: Tue Jan 3 13:04:50 2017 -0800
Committer: Chris Nauroth 
Committed: Tue Jan 3 13:16:06 2017 -0800

--
 hadoop-client-modules/hadoop-client/pom.xml | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebdd2e03/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index 0394cae..cc527bc 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -23,7 +23,6 @@
 
   hadoop-client
   3.0.0-alpha2-SNAPSHOT
-  pom
 
   Apache Hadoop Client aggregation pom with dependencies 
exposed
   Apache Hadoop Client Aggregator


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HADOOP-13502. Split fs.contract.is-blobstore flag into more descriptive flags for use by contract tests. Contributed by Chris Nauroth.

2016-10-26 Thread cnauroth
HADOOP-13502. Split fs.contract.is-blobstore flag into more descriptive flags 
for use by contract tests. Contributed by Chris Nauroth.

(cherry picked from commit 1f8490a5bacd98d0d371447ada3b31f93ca40a4e)
(cherry picked from commit 082d69ee663d5c1a59f32c40b9fbde9996886de9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4df17809
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4df17809
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4df17809

Branch: refs/heads/branch-2.8
Commit: 4df1780991db8262dce64d7bfeae38b99751a42a
Parents: c940c68
Author: Chris Nauroth 
Authored: Wed Oct 26 08:57:29 2016 -0700
Committer: Chris Nauroth 
Committed: Wed Oct 26 08:57:46 2016 -0700

--
 .gitignore|  1 +
 .../fs/contract/AbstractContractCreateTest.java   | 18 +-
 .../hadoop/fs/contract/ContractOptions.java   | 18 ++
 .../src/test/resources/contract/ftp.xml   |  7 +--
 .../hadoop-aws/src/test/resources/contract/s3.xml | 10 ++
 .../src/test/resources/contract/s3a.xml   |  5 +
 .../src/test/resources/contract/s3n.xml   | 10 ++
 .../src/test/resources/contract/swift.xml | 10 ++
 8 files changed, 64 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4df17809/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 5925ec4..eb1fc96 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,4 +29,5 @@ yarnregistry.pdf
 hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
+hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4df17809/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index 9344225..84dc775 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -121,7 +121,7 @@ public abstract class AbstractContractCreateTest extends
 try {
   assertIsDirectory(path);
 } catch (AssertionError failure) {
-  if (isSupported(IS_BLOBSTORE)) {
+  if (isSupported(CREATE_OVERWRITES_DIRECTORY)) {
 // file/directory hack surfaces here
 throw new AssumptionViolatedException(failure.toString(), failure);
   }
@@ -137,10 +137,10 @@ public abstract class AbstractContractCreateTest extends
   FileStatus status = getFileSystem().getFileStatus(path);
 
   boolean isDir = status.isDirectory();
-  if (!isDir && isSupported(IS_BLOBSTORE)) {
-// object store: downgrade to a skip so that the failure is visible
-// in test results
-skip("Object store allows a file to overwrite a directory");
+  if (!isDir && isSupported(CREATE_OVERWRITES_DIRECTORY)) {
+// For some file systems, downgrade to a skip so that the failure is
+// visible in test results.
+skip("This Filesystem allows a file to overwrite a directory");
   }
   fail("write of file over dir succeeded");
 } catch (FileAlreadyExistsException expected) {
@@ -170,10 +170,10 @@ public abstract class AbstractContractCreateTest extends
1024)) {
   if (!getFileSystem().exists(path)) {
 
-if (isSupported(IS_BLOBSTORE)) {
-  // object store: downgrade to a skip so that the failure is visible
-  // in test results
-  skip("Filesystem is an object store and newly created files are not 
immediately visible");
+if (isSupported(CREATE_VISIBILITY_DELAYED)) {
+  // For some file systems, downgrade to a skip so that the failure is
+  // visible in test results.
+  skip("This Filesystem delays visibility of newly created files");
 }
 assertPathExists("expected path to be visible before anything written",
  path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4df17809/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java

[1/3] hadoop git commit: HADOOP-13502. Split fs.contract.is-blobstore flag into more descriptive flags for use by contract tests. Contributed by Chris Nauroth.

2016-10-26 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 67e01f721 -> 082d69ee6
  refs/heads/branch-2.8 c940c68c7 -> 4df178099
  refs/heads/trunk 9cad3e235 -> 1f8490a5b


HADOOP-13502. Split fs.contract.is-blobstore flag into more descriptive flags 
for use by contract tests. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f8490a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f8490a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f8490a5

Branch: refs/heads/trunk
Commit: 1f8490a5bacd98d0d371447ada3b31f93ca40a4e
Parents: 9cad3e2
Author: Chris Nauroth 
Authored: Wed Oct 26 08:55:42 2016 -0700
Committer: Chris Nauroth 
Committed: Wed Oct 26 08:55:42 2016 -0700

--
 .gitignore|  1 +
 .../fs/contract/AbstractContractCreateTest.java   | 18 +-
 .../hadoop/fs/contract/ContractOptions.java   | 18 ++
 .../src/test/resources/contract/ftp.xml   |  7 +--
 .../src/test/resources/contract/s3a.xml   |  5 +
 .../src/test/resources/contract/s3n.xml   | 10 ++
 .../src/test/resources/contract/swift.xml | 10 ++
 7 files changed, 54 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f8490a5/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 194862b..cbecfc0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,6 +30,7 @@ yarnregistry.pdf
 hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
+hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/
 hadoop-tools/hadoop-aliyun/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aliyun/src/test/resources/contract-test-options.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f8490a5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index 9344225..84dc775 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -121,7 +121,7 @@ public abstract class AbstractContractCreateTest extends
 try {
   assertIsDirectory(path);
 } catch (AssertionError failure) {
-  if (isSupported(IS_BLOBSTORE)) {
+  if (isSupported(CREATE_OVERWRITES_DIRECTORY)) {
 // file/directory hack surfaces here
 throw new AssumptionViolatedException(failure.toString(), failure);
   }
@@ -137,10 +137,10 @@ public abstract class AbstractContractCreateTest extends
   FileStatus status = getFileSystem().getFileStatus(path);
 
   boolean isDir = status.isDirectory();
-  if (!isDir && isSupported(IS_BLOBSTORE)) {
-// object store: downgrade to a skip so that the failure is visible
-// in test results
-skip("Object store allows a file to overwrite a directory");
+  if (!isDir && isSupported(CREATE_OVERWRITES_DIRECTORY)) {
+// For some file systems, downgrade to a skip so that the failure is
+// visible in test results.
+skip("This Filesystem allows a file to overwrite a directory");
   }
   fail("write of file over dir succeeded");
 } catch (FileAlreadyExistsException expected) {
@@ -170,10 +170,10 @@ public abstract class AbstractContractCreateTest extends
1024)) {
   if (!getFileSystem().exists(path)) {
 
-if (isSupported(IS_BLOBSTORE)) {
-  // object store: downgrade to a skip so that the failure is visible
-  // in test results
-  skip("Filesystem is an object store and newly created files are not 
immediately visible");
+if (isSupported(CREATE_VISIBILITY_DELAYED)) {
+  // For some file systems, downgrade to a skip so that the failure is
+  // visible in test results.
+  skip("This Filesystem delays visibility of newly created files");
 }
 assertPathExists("expected path to be visible before anything written",
  path);


[2/3] hadoop git commit: HADOOP-13502. Split fs.contract.is-blobstore flag into more descriptive flags for use by contract tests. Contributed by Chris Nauroth.

2016-10-26 Thread cnauroth
HADOOP-13502. Split fs.contract.is-blobstore flag into more descriptive flags 
for use by contract tests. Contributed by Chris Nauroth.

(cherry picked from commit 1f8490a5bacd98d0d371447ada3b31f93ca40a4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/082d69ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/082d69ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/082d69ee

Branch: refs/heads/branch-2
Commit: 082d69ee663d5c1a59f32c40b9fbde9996886de9
Parents: 67e01f7
Author: Chris Nauroth 
Authored: Wed Oct 26 08:57:29 2016 -0700
Committer: Chris Nauroth 
Committed: Wed Oct 26 08:57:29 2016 -0700

--
 .gitignore|  1 +
 .../fs/contract/AbstractContractCreateTest.java   | 18 +-
 .../hadoop/fs/contract/ContractOptions.java   | 18 ++
 .../src/test/resources/contract/ftp.xml   |  7 +--
 .../hadoop-aws/src/test/resources/contract/s3.xml | 10 ++
 .../src/test/resources/contract/s3a.xml   |  5 +
 .../src/test/resources/contract/s3n.xml   | 10 ++
 .../src/test/resources/contract/swift.xml | 10 ++
 8 files changed, 64 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/082d69ee/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 5925ec4..eb1fc96 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,4 +29,5 @@ yarnregistry.pdf
 hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
+hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/082d69ee/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index 9344225..84dc775 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -121,7 +121,7 @@ public abstract class AbstractContractCreateTest extends
 try {
   assertIsDirectory(path);
 } catch (AssertionError failure) {
-  if (isSupported(IS_BLOBSTORE)) {
+  if (isSupported(CREATE_OVERWRITES_DIRECTORY)) {
 // file/directory hack surfaces here
 throw new AssumptionViolatedException(failure.toString(), failure);
   }
@@ -137,10 +137,10 @@ public abstract class AbstractContractCreateTest extends
   FileStatus status = getFileSystem().getFileStatus(path);
 
   boolean isDir = status.isDirectory();
-  if (!isDir && isSupported(IS_BLOBSTORE)) {
-// object store: downgrade to a skip so that the failure is visible
-// in test results
-skip("Object store allows a file to overwrite a directory");
+  if (!isDir && isSupported(CREATE_OVERWRITES_DIRECTORY)) {
+// For some file systems, downgrade to a skip so that the failure is
+// visible in test results.
+skip("This Filesystem allows a file to overwrite a directory");
   }
   fail("write of file over dir succeeded");
 } catch (FileAlreadyExistsException expected) {
@@ -170,10 +170,10 @@ public abstract class AbstractContractCreateTest extends
1024)) {
   if (!getFileSystem().exists(path)) {
 
-if (isSupported(IS_BLOBSTORE)) {
-  // object store: downgrade to a skip so that the failure is visible
-  // in test results
-  skip("Filesystem is an object store and newly created files are not 
immediately visible");
+if (isSupported(CREATE_VISIBILITY_DELAYED)) {
+  // For some file systems, downgrade to a skip so that the failure is
+  // visible in test results.
+  skip("This Filesystem delays visibility of newly created files");
 }
 assertPathExists("expected path to be visible before anything written",
  path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/082d69ee/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
--
diff --git 

[1/3] hadoop git commit: HADOOP-13614. Purge some superfluous/obsolete S3 FS tests that are slowing test runs down. Contributed by Steve Loughran.

2016-10-26 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0cd43dd2b -> 67e01f721
  refs/heads/branch-2.8 e7bd2e8d6 -> c940c68c7
  refs/heads/trunk e90af4a89 -> 9cad3e235


HADOOP-13614. Purge some superfluous/obsolete S3 FS tests that are slowing test 
runs down. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cad3e23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cad3e23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cad3e23

Branch: refs/heads/trunk
Commit: 9cad3e235026dbe4658705ca85d263d0edf14521
Parents: e90af4a
Author: Chris Nauroth 
Authored: Wed Oct 26 08:27:26 2016 -0700
Committer: Chris Nauroth 
Committed: Wed Oct 26 08:27:26 2016 -0700

--
 .../hadoop/fs/contract/ContractTestUtils.java   |   6 +
 .../TestFSMainOperationsLocalFileSystem.java|   4 +-
 hadoop-tools/hadoop-aws/pom.xml |   7 -
 .../fs/contract/s3a/ITestS3AContractDistCp.java |   6 +
 .../hadoop/fs/contract/s3a/S3AContract.java |   6 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |  26 +++-
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |  82 ---
 .../apache/hadoop/fs/s3a/ITestS3ABlocksize.java |  19 +--
 .../hadoop/fs/s3a/ITestS3AConfiguration.java|  23 +--
 .../hadoop/fs/s3a/ITestS3AEncryption.java   |   9 +-
 .../ITestS3AEncryptionAlgorithmPropagation.java |   7 -
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  11 +-
 .../fs/s3a/ITestS3AFileOperationCost.java   |  19 +--
 .../fs/s3a/ITestS3AFileSystemContract.java  |  33 -
 .../fs/s3a/ITestS3ATemporaryCredentials.java|  14 +-
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  27 +++-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  16 ++-
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java |  25 ++--
 .../s3a/scale/ITestS3ADeleteFilesOneByOne.java  |  12 +-
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java   |  13 +-
 .../s3a/scale/ITestS3ADirectoryPerformance.java |  16 ++-
 .../scale/ITestS3AHugeFilesClassicOutput.java   |   4 +-
 .../scale/ITestS3AInputStreamPerformance.java   |   3 +-
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 139 +--
 .../org/apache/hadoop/fs/s3a/yarn/ITestS3A.java |   4 +-
 .../fs/s3a/yarn/ITestS3AMiniYarnCluster.java|  50 +++
 26 files changed, 255 insertions(+), 326 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cad3e23/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 73c8f1c..f6b6389 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -834,6 +834,7 @@ public class ContractTestUtils extends Assert {
 
 long totalBytesRead = 0;
 int nextExpectedNumber = 0;
+NanoTimer timer = new NanoTimer();
 try (InputStream inputStream = fs.open(path)) {
   while (true) {
 final int bytesRead = inputStream.read(testBuffer);
@@ -862,6 +863,8 @@ public class ContractTestUtils extends Assert {
 " bytes but only received " + totalBytesRead);
   }
 }
+timer.end("Time to read %d bytes", expectedSize);
+bandwidth(timer, expectedSize);
   }
 
   /**
@@ -925,9 +928,12 @@ public class ContractTestUtils extends Assert {
 final Path objectPath = new Path(parent, objectName);
 
 // Write test file in a specific pattern
+NanoTimer timer = new NanoTimer();
 assertEquals(fileSize,
 generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
 assertPathExists(fs, "not created successful", objectPath);
+timer.end("Time to write %d bytes", fileSize);
+bandwidth(timer, fileSize);
 
 // Now read the same file back and verify its content
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cad3e23/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
index 6081f38..12687fd 100644
--- 

[2/3] hadoop git commit: HADOOP-13614. Purge some superfluous/obsolete S3 FS tests that are slowing test runs down. Contributed by Steve Loughran.

2016-10-26 Thread cnauroth
HADOOP-13614. Purge some superfluous/obsolete S3 FS tests that are slowing test 
runs down. Contributed by Steve Loughran.

(cherry picked from commit 9cad3e235026dbe4658705ca85d263d0edf14521)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67e01f72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67e01f72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67e01f72

Branch: refs/heads/branch-2
Commit: 67e01f7218e592d7b18316d65f7b22ae8b9ad7a6
Parents: 0cd43dd
Author: Chris Nauroth 
Authored: Wed Oct 26 08:27:26 2016 -0700
Committer: Chris Nauroth 
Committed: Wed Oct 26 08:27:33 2016 -0700

--
 .../hadoop/fs/contract/ContractTestUtils.java   |   6 +
 .../TestFSMainOperationsLocalFileSystem.java|   4 +-
 hadoop-tools/hadoop-aws/pom.xml |   7 -
 .../fs/contract/s3a/ITestS3AContractDistCp.java |   6 +
 .../hadoop/fs/contract/s3a/S3AContract.java |   6 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |  26 +++-
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |  82 ---
 .../apache/hadoop/fs/s3a/ITestS3ABlocksize.java |  19 +--
 .../hadoop/fs/s3a/ITestS3AConfiguration.java|  23 +--
 .../hadoop/fs/s3a/ITestS3AEncryption.java   |   9 +-
 .../ITestS3AEncryptionAlgorithmPropagation.java |   7 -
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  11 +-
 .../fs/s3a/ITestS3AFileOperationCost.java   |  19 +--
 .../fs/s3a/ITestS3AFileSystemContract.java  |  33 -
 .../fs/s3a/ITestS3ATemporaryCredentials.java|  14 +-
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  27 +++-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  16 ++-
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java |  25 ++--
 .../s3a/scale/ITestS3ADeleteFilesOneByOne.java  |  12 +-
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java   |  13 +-
 .../s3a/scale/ITestS3ADirectoryPerformance.java |  16 ++-
 .../scale/ITestS3AHugeFilesClassicOutput.java   |   4 +-
 .../scale/ITestS3AInputStreamPerformance.java   |   3 +-
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 139 +--
 .../org/apache/hadoop/fs/s3a/yarn/ITestS3A.java |   4 +-
 .../fs/s3a/yarn/ITestS3AMiniYarnCluster.java|  50 +++
 26 files changed, 255 insertions(+), 326 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67e01f72/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 73c8f1c..f6b6389 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -834,6 +834,7 @@ public class ContractTestUtils extends Assert {
 
 long totalBytesRead = 0;
 int nextExpectedNumber = 0;
+NanoTimer timer = new NanoTimer();
 try (InputStream inputStream = fs.open(path)) {
   while (true) {
 final int bytesRead = inputStream.read(testBuffer);
@@ -862,6 +863,8 @@ public class ContractTestUtils extends Assert {
 " bytes but only received " + totalBytesRead);
   }
 }
+timer.end("Time to read %d bytes", expectedSize);
+bandwidth(timer, expectedSize);
   }
 
   /**
@@ -925,9 +928,12 @@ public class ContractTestUtils extends Assert {
 final Path objectPath = new Path(parent, objectName);
 
 // Write test file in a specific pattern
+NanoTimer timer = new NanoTimer();
 assertEquals(fileSize,
 generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
 assertPathExists(fs, "not created successful", objectPath);
+timer.end("Time to write %d bytes", fileSize);
+bandwidth(timer, fileSize);
 
 // Now read the same file back and verify its content
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67e01f72/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
index 6081f38..12687fd 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
+++ 

[3/3] hadoop git commit: HADOOP-13614. Purge some superfluous/obsolete S3 FS tests that are slowing test runs down. Contributed by Steve Loughran.

2016-10-26 Thread cnauroth
HADOOP-13614. Purge some superfluous/obsolete S3 FS tests that are slowing test 
runs down. Contributed by Steve Loughran.

(cherry picked from commit 9cad3e235026dbe4658705ca85d263d0edf14521)
(cherry picked from commit 67e01f7218e592d7b18316d65f7b22ae8b9ad7a6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c940c68c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c940c68c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c940c68c

Branch: refs/heads/branch-2.8
Commit: c940c68c79f2995c8a32f239bbe0288a63c317ed
Parents: e7bd2e8
Author: Chris Nauroth 
Authored: Wed Oct 26 08:27:26 2016 -0700
Committer: Chris Nauroth 
Committed: Wed Oct 26 08:27:39 2016 -0700

--
 .../hadoop/fs/contract/ContractTestUtils.java   |   6 +
 .../TestFSMainOperationsLocalFileSystem.java|   4 +-
 hadoop-tools/hadoop-aws/pom.xml |   7 -
 .../fs/contract/s3a/ITestS3AContractDistCp.java |   6 +
 .../hadoop/fs/contract/s3a/S3AContract.java |   6 +-
 .../hadoop/fs/s3a/AbstractS3ATestBase.java  |  26 +++-
 .../fs/s3a/ITestS3ABlockingThreadPool.java  |  82 ---
 .../apache/hadoop/fs/s3a/ITestS3ABlocksize.java |  19 +--
 .../hadoop/fs/s3a/ITestS3AConfiguration.java|  23 +--
 .../hadoop/fs/s3a/ITestS3AEncryption.java   |   9 +-
 .../ITestS3AEncryptionAlgorithmPropagation.java |   7 -
 .../hadoop/fs/s3a/ITestS3AFailureHandling.java  |  11 +-
 .../fs/s3a/ITestS3AFileOperationCost.java   |  19 +--
 .../fs/s3a/ITestS3AFileSystemContract.java  |  33 -
 .../fs/s3a/ITestS3ATemporaryCredentials.java|  14 +-
 .../apache/hadoop/fs/s3a/S3ATestConstants.java  |  27 +++-
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  16 ++-
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java |  25 ++--
 .../s3a/scale/ITestS3ADeleteFilesOneByOne.java  |  12 +-
 .../fs/s3a/scale/ITestS3ADeleteManyFiles.java   |  13 +-
 .../s3a/scale/ITestS3ADirectoryPerformance.java |  16 ++-
 .../scale/ITestS3AHugeFilesClassicOutput.java   |   4 +-
 .../scale/ITestS3AInputStreamPerformance.java   |   3 +-
 .../hadoop/fs/s3a/scale/S3AScaleTestBase.java   | 139 +--
 .../org/apache/hadoop/fs/s3a/yarn/ITestS3A.java |   4 +-
 .../fs/s3a/yarn/ITestS3AMiniYarnCluster.java|  50 +++
 26 files changed, 255 insertions(+), 326 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c940c68c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index 73c8f1c..f6b6389 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -834,6 +834,7 @@ public class ContractTestUtils extends Assert {
 
 long totalBytesRead = 0;
 int nextExpectedNumber = 0;
+NanoTimer timer = new NanoTimer();
 try (InputStream inputStream = fs.open(path)) {
   while (true) {
 final int bytesRead = inputStream.read(testBuffer);
@@ -862,6 +863,8 @@ public class ContractTestUtils extends Assert {
 " bytes but only received " + totalBytesRead);
   }
 }
+timer.end("Time to read %d bytes", expectedSize);
+bandwidth(timer, expectedSize);
   }
 
   /**
@@ -925,9 +928,12 @@ public class ContractTestUtils extends Assert {
 final Path objectPath = new Path(parent, objectName);
 
 // Write test file in a specific pattern
+NanoTimer timer = new NanoTimer();
 assertEquals(fileSize,
 generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
 assertPathExists(fs, "not created successful", objectPath);
+timer.end("Time to write %d bytes", fileSize);
+bandwidth(timer, fileSize);
 
 // Now read the same file back and verify its content
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c940c68c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
index 6081f38..12687fd 100644
--- 

[2/3] hadoop git commit: HADOOP-13309. Document S3A known limitations in file ownership and permission model. Contributed by Chris Nauroth.

2016-10-25 Thread cnauroth
HADOOP-13309. Document S3A known limitations in file ownership and permission 
model. Contributed by Chris Nauroth.

(cherry picked from commit 309a43925c078ff51cdb6bd1273e6f91f43311cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05d772e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05d772e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05d772e2

Branch: refs/heads/branch-2
Commit: 05d772e297fa3738b3fdddfa666bd6e23688f2b6
Parents: b913b62
Author: Chris Nauroth 
Authored: Tue Oct 25 09:03:03 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Oct 25 09:03:11 2016 -0700

--
 .../site/markdown/filesystem/introduction.md| 15 +
 .../src/site/markdown/tools/hadoop-aws/index.md | 34 +---
 2 files changed, 44 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05d772e2/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
index 22da54c..194fa15 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
@@ -373,6 +373,21 @@ a time proportional to the quantity of data to upload, and 
inversely proportiona
 to the network bandwidth. It may also fail a failure that is better
 escalated than ignored.
 
+1. **Authorization**. Hadoop uses the `FileStatus` class to
+represent core metadata of files and directories, including the owner, group 
and
+permissions.  Object stores might not have a viable way to persist this
+metadata, so they might need to populate `FileStatus` with stub values.  Even 
if
+the object store persists this metadata, it still might not be feasible for the
+object store to enforce file authorization in the same way as a traditional 
file
+system.  If the object store cannot persist this metadata, then the recommended
+convention is:
+* File owner is reported as the current user.
+* File group also is reported as the current user.
+* Directory permissions are reported as 777.
+* File permissions are reported as 666.
+* File system APIs that set ownership and permissions execute successfully
+  without error, but they are no-ops.
+
 Object stores with these characteristics, can not be used as a direct 
replacement
 for HDFS. In terms of this specification, their implementations of the
 specified operations do not match those required. They are considered supported

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05d772e2/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index a37882f..fe4f972 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -36,7 +36,7 @@ higher performance.
 
 The specifics of using these filesystems are documented below.
 
-### Warning #1: Object Stores are not filesystems.
+### Warning #1: Object Stores are not filesystems
 
 Amazon S3 is an example of "an object store". In order to achieve scalability
 and especially high availability, S3 has —as many other cloud object stores 
have
@@ -53,14 +53,38 @@ recursive file-by-file operations. They take time at least 
proportional to
 the number of files, during which time partial updates may be visible. If
 the operations are interrupted, the filesystem is left in an intermediate 
state.
 
-### Warning #2: Because Object stores don't track modification times of 
directories,
-features of Hadoop relying on this can have unexpected behaviour. E.g. the
+### Warning #2: Object stores don't track modification times of directories
+
+Features of Hadoop relying on this can have unexpected behaviour. E.g. the
 AggregatedLogDeletionService of YARN will not remove the appropriate logfiles.
 
 For further discussion on these topics, please consult
 [The Hadoop FileSystem API 
Definition](../../../hadoop-project-dist/hadoop-common/filesystem/index.html).
 
-### Warning #3: your AWS credentials are valuable
+### Warning #3: Object stores have differerent authorization models
+
+The object authorization model of S3 is much different from the file
+authorization model of HDFS and traditional file systems.  It is not feasible 
to
+persist file 

[1/3] hadoop git commit: HADOOP-13309. Document S3A known limitations in file ownership and permission model. Contributed by Chris Nauroth.

2016-10-25 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b913b62aa -> 05d772e29
  refs/heads/branch-2.8 444312b54 -> 3d5f41544
  refs/heads/trunk dbd205762 -> 309a43925


HADOOP-13309. Document S3A known limitations in file ownership and permission 
model. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/309a4392
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/309a4392
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/309a4392

Branch: refs/heads/trunk
Commit: 309a43925c078ff51cdb6bd1273e6f91f43311cb
Parents: dbd2057
Author: Chris Nauroth 
Authored: Tue Oct 25 09:03:03 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Oct 25 09:03:03 2016 -0700

--
 .../site/markdown/filesystem/introduction.md| 15 +
 .../src/site/markdown/tools/hadoop-aws/index.md | 34 +---
 2 files changed, 44 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/309a4392/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
index 22da54c..194fa15 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
@@ -373,6 +373,21 @@ a time proportional to the quantity of data to upload, and 
inversely proportiona
 to the network bandwidth. It may also fail a failure that is better
 escalated than ignored.
 
+1. **Authorization**. Hadoop uses the `FileStatus` class to
+represent core metadata of files and directories, including the owner, group 
and
+permissions.  Object stores might not have a viable way to persist this
+metadata, so they might need to populate `FileStatus` with stub values.  Even 
if
+the object store persists this metadata, it still might not be feasible for the
+object store to enforce file authorization in the same way as a traditional 
file
+system.  If the object store cannot persist this metadata, then the recommended
+convention is:
+* File owner is reported as the current user.
+* File group also is reported as the current user.
+* Directory permissions are reported as 777.
+* File permissions are reported as 666.
+* File system APIs that set ownership and permissions execute successfully
+  without error, but they are no-ops.
+
 Object stores with these characteristics, can not be used as a direct 
replacement
 for HDFS. In terms of this specification, their implementations of the
 specified operations do not match those required. They are considered supported

http://git-wip-us.apache.org/repos/asf/hadoop/blob/309a4392/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index c0d9157..0eb36ef 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -39,7 +39,7 @@ higher performance.
 
 The specifics of using these filesystems are documented below.
 
-### Warning #1: Object Stores are not filesystems.
+### Warning #1: Object Stores are not filesystems
 
 Amazon S3 is an example of "an object store". In order to achieve scalability
 and especially high availability, S3 has —as many other cloud object stores 
have
@@ -56,14 +56,38 @@ recursive file-by-file operations. They take time at least 
proportional to
 the number of files, during which time partial updates may be visible. If
 the operations are interrupted, the filesystem is left in an intermediate 
state.
 
-### Warning #2: Because Object stores don't track modification times of 
directories,
-features of Hadoop relying on this can have unexpected behaviour. E.g. the
+### Warning #2: Object stores don't track modification times of directories
+
+Features of Hadoop relying on this can have unexpected behaviour. E.g. the
 AggregatedLogDeletionService of YARN will not remove the appropriate logfiles.
 
 For further discussion on these topics, please consult
 [The Hadoop FileSystem API 
Definition](../../../hadoop-project-dist/hadoop-common/filesystem/index.html).
 
-### Warning #3: your AWS credentials are valuable
+### Warning #3: Object stores have differerent authorization models
+
+The object authorization model of S3 is much different from the file

[3/3] hadoop git commit: HADOOP-13309. Document S3A known limitations in file ownership and permission model. Contributed by Chris Nauroth.

2016-10-25 Thread cnauroth
HADOOP-13309. Document S3A known limitations in file ownership and permission 
model. Contributed by Chris Nauroth.

(cherry picked from commit 309a43925c078ff51cdb6bd1273e6f91f43311cb)
(cherry picked from commit 05d772e297fa3738b3fdddfa666bd6e23688f2b6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d5f4154
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d5f4154
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d5f4154

Branch: refs/heads/branch-2.8
Commit: 3d5f41544a4fcd8f4760253f5dbfbee15b1a65b6
Parents: 444312b
Author: Chris Nauroth 
Authored: Tue Oct 25 09:03:03 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Oct 25 09:03:19 2016 -0700

--
 .../site/markdown/filesystem/introduction.md| 15 +
 .../src/site/markdown/tools/hadoop-aws/index.md | 34 +---
 2 files changed, 44 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d5f4154/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
index 22da54c..194fa15 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
@@ -373,6 +373,21 @@ a time proportional to the quantity of data to upload, and 
inversely proportiona
 to the network bandwidth. It may also fail a failure that is better
 escalated than ignored.
 
+1. **Authorization**. Hadoop uses the `FileStatus` class to
+represent core metadata of files and directories, including the owner, group 
and
+permissions.  Object stores might not have a viable way to persist this
+metadata, so they might need to populate `FileStatus` with stub values.  Even 
if
+the object store persists this metadata, it still might not be feasible for the
+object store to enforce file authorization in the same way as a traditional 
file
+system.  If the object store cannot persist this metadata, then the recommended
+convention is:
+* File owner is reported as the current user.
+* File group also is reported as the current user.
+* Directory permissions are reported as 777.
+* File permissions are reported as 666.
+* File system APIs that set ownership and permissions execute successfully
+  without error, but they are no-ops.
+
 Object stores with these characteristics, can not be used as a direct 
replacement
 for HDFS. In terms of this specification, their implementations of the
 specified operations do not match those required. They are considered supported

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d5f4154/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index a37882f..fe4f972 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -36,7 +36,7 @@ higher performance.
 
 The specifics of using these filesystems are documented below.
 
-### Warning #1: Object Stores are not filesystems.
+### Warning #1: Object Stores are not filesystems
 
 Amazon S3 is an example of "an object store". In order to achieve scalability
 and especially high availability, S3 has —as many other cloud object stores 
have
@@ -53,14 +53,38 @@ recursive file-by-file operations. They take time at least 
proportional to
 the number of files, during which time partial updates may be visible. If
 the operations are interrupted, the filesystem is left in an intermediate 
state.
 
-### Warning #2: Because Object stores don't track modification times of 
directories,
-features of Hadoop relying on this can have unexpected behaviour. E.g. the
+### Warning #2: Object stores don't track modification times of directories
+
+Features of Hadoop relying on this can have unexpected behaviour. E.g. the
 AggregatedLogDeletionService of YARN will not remove the appropriate logfiles.
 
 For further discussion on these topics, please consult
 [The Hadoop FileSystem API 
Definition](../../../hadoop-project-dist/hadoop-common/filesystem/index.html).
 
-### Warning #3: your AWS credentials are valuable
+### Warning #3: Object stores have differerent authorization models
+
+The object authorization model of S3 is much different from the file
+authorization model of HDFS 

[3/3] hadoop git commit: HADOOP-12774. s3a should use UGI.getCurrentUser.getShortname() for username. Contributed by Steve Loughran.

2016-10-24 Thread cnauroth
HADOOP-12774. s3a should use UGI.getCurrentUser.getShortname() for username. 
Contributed by Steve Loughran.

(cherry picked from commit 3372e940303149d6258e0b72c54d72f080f0daa2)
(cherry picked from commit 5c2f67bdae208ea14c518af58e551d563b2bf8cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02f0472d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02f0472d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02f0472d

Branch: refs/heads/branch-2.8
Commit: 02f0472d1ea0c7a28c2ee1471408c433790f05d2
Parents: 78970e0
Author: Chris Nauroth 
Authored: Mon Oct 24 21:54:06 2016 -0700
Committer: Chris Nauroth 
Committed: Mon Oct 24 21:54:25 2016 -0700

--
 .../java/org/apache/hadoop/fs/s3a/Listing.java  |  5 +--
 .../org/apache/hadoop/fs/s3a/S3AFileStatus.java | 33 ++--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 29 -
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  8 +++--
 .../hadoop/fs/s3a/ITestS3AConfiguration.java| 23 +-
 5 files changed, 74 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02f0472d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
index 4120b20..30d8e6f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
@@ -298,7 +298,7 @@ public class Listing {
 // Skip over keys that are ourselves and old S3N _$folder$ files
 if (acceptor.accept(keyPath, summary) && filter.accept(keyPath)) {
   FileStatus status = createFileStatus(keyPath, summary,
-  owner.getDefaultBlockSize(keyPath));
+  owner.getDefaultBlockSize(keyPath), owner.getUsername());
   LOG.debug("Adding: {}", status);
   stats.add(status);
   added++;
@@ -312,7 +312,8 @@ public class Listing {
   for (String prefix : objects.getCommonPrefixes()) {
 Path keyPath = owner.keyToQualifiedPath(prefix);
 if (acceptor.accept(keyPath, prefix) && filter.accept(keyPath)) {
-  FileStatus status = new S3AFileStatus(true, false, keyPath);
+  FileStatus status = new S3AFileStatus(false, keyPath,
+  owner.getUsername());
   LOG.debug("Adding directory: {}", status);
   added++;
   stats.add(status);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02f0472d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
index 75a6500..b0f08e3 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
@@ -33,28 +33,41 @@ import org.apache.hadoop.fs.Path;
 public class S3AFileStatus extends FileStatus {
   private boolean isEmptyDirectory;
 
-  // Directories
-  public S3AFileStatus(boolean isdir, boolean isemptydir, Path path) {
-super(0, isdir, 1, 0, 0, path);
+  /**
+   * Create a directory status.
+   * @param isemptydir is this an empty directory?
+   * @param path the path
+   * @param owner the owner
+   */
+  public S3AFileStatus(boolean isemptydir,
+  Path path,
+  String owner) {
+super(0, true, 1, 0, 0, path);
 isEmptyDirectory = isemptydir;
+setOwner(owner);
+setGroup(owner);
   }
 
-  // Files
+  /**
+   * A simple file.
+   * @param length file length
+   * @param modification_time mod time
+   * @param path path
+   * @param blockSize block size
+   * @param owner owner
+   */
   public S3AFileStatus(long length, long modification_time, Path path,
-  long blockSize) {
+  long blockSize, String owner) {
 super(length, false, 1, blockSize, modification_time, path);
 isEmptyDirectory = false;
+setOwner(owner);
+setGroup(owner);
   }
 
   public boolean isEmptyDirectory() {
 return isEmptyDirectory;
   }
 
-  @Override
-  public String getOwner() {
-return System.getProperty("user.name");
-  }
-
   /** Compare if this object is equal to another object.
* @param   o the object to be compared.
* @return  true if two file status has the same path name; false if not.


[2/3] hadoop git commit: HADOOP-12774. s3a should use UGI.getCurrentUser.getShortname() for username. Contributed by Steve Loughran.

2016-10-24 Thread cnauroth
HADOOP-12774. s3a should use UGI.getCurrentUser.getShortname() for username. 
Contributed by Steve Loughran.

(cherry picked from commit 3372e940303149d6258e0b72c54d72f080f0daa2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c2f67bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c2f67bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c2f67bd

Branch: refs/heads/branch-2
Commit: 5c2f67bdae208ea14c518af58e551d563b2bf8cd
Parents: 5b7cbb5
Author: Chris Nauroth 
Authored: Mon Oct 24 21:54:06 2016 -0700
Committer: Chris Nauroth 
Committed: Mon Oct 24 21:54:16 2016 -0700

--
 .../java/org/apache/hadoop/fs/s3a/Listing.java  |  5 +--
 .../org/apache/hadoop/fs/s3a/S3AFileStatus.java | 33 ++--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 29 -
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  8 +++--
 .../hadoop/fs/s3a/ITestS3AConfiguration.java| 23 +-
 5 files changed, 74 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c2f67bd/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
index 4120b20..30d8e6f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
@@ -298,7 +298,7 @@ public class Listing {
 // Skip over keys that are ourselves and old S3N _$folder$ files
 if (acceptor.accept(keyPath, summary) && filter.accept(keyPath)) {
   FileStatus status = createFileStatus(keyPath, summary,
-  owner.getDefaultBlockSize(keyPath));
+  owner.getDefaultBlockSize(keyPath), owner.getUsername());
   LOG.debug("Adding: {}", status);
   stats.add(status);
   added++;
@@ -312,7 +312,8 @@ public class Listing {
   for (String prefix : objects.getCommonPrefixes()) {
 Path keyPath = owner.keyToQualifiedPath(prefix);
 if (acceptor.accept(keyPath, prefix) && filter.accept(keyPath)) {
-  FileStatus status = new S3AFileStatus(true, false, keyPath);
+  FileStatus status = new S3AFileStatus(false, keyPath,
+  owner.getUsername());
   LOG.debug("Adding directory: {}", status);
   added++;
   stats.add(status);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c2f67bd/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
index 75a6500..b0f08e3 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileStatus.java
@@ -33,28 +33,41 @@ import org.apache.hadoop.fs.Path;
 public class S3AFileStatus extends FileStatus {
   private boolean isEmptyDirectory;
 
-  // Directories
-  public S3AFileStatus(boolean isdir, boolean isemptydir, Path path) {
-super(0, isdir, 1, 0, 0, path);
+  /**
+   * Create a directory status.
+   * @param isemptydir is this an empty directory?
+   * @param path the path
+   * @param owner the owner
+   */
+  public S3AFileStatus(boolean isemptydir,
+  Path path,
+  String owner) {
+super(0, true, 1, 0, 0, path);
 isEmptyDirectory = isemptydir;
+setOwner(owner);
+setGroup(owner);
   }
 
-  // Files
+  /**
+   * A simple file.
+   * @param length file length
+   * @param modification_time mod time
+   * @param path path
+   * @param blockSize block size
+   * @param owner owner
+   */
   public S3AFileStatus(long length, long modification_time, Path path,
-  long blockSize) {
+  long blockSize, String owner) {
 super(length, false, 1, blockSize, modification_time, path);
 isEmptyDirectory = false;
+setOwner(owner);
+setGroup(owner);
   }
 
   public boolean isEmptyDirectory() {
 return isEmptyDirectory;
   }
 
-  @Override
-  public String getOwner() {
-return System.getProperty("user.name");
-  }
-
   /** Compare if this object is equal to another object.
* @param   o the object to be compared.
* @return  true if two file status has the same path name; false if not.


[1/3] hadoop git commit: HADOOP-13727. S3A: Reduce high number of connections to EC2 Instance Metadata Service caused by InstanceProfileCredentialsProvider. Contributed by Chris Nauroth.

2016-10-24 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 81595a127 -> 5b7cbb5a3
  refs/heads/branch-2.8 c82910203 -> 78970e0db
  refs/heads/trunk 0a166b134 -> d8fa1cfa6


HADOOP-13727. S3A: Reduce high number of connections to EC2 Instance Metadata 
Service caused by InstanceProfileCredentialsProvider. Contributed by Chris 
Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8fa1cfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8fa1cfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8fa1cfa

Branch: refs/heads/trunk
Commit: d8fa1cfa6722cbf7a4ec3d6b9c44b034da9aa351
Parents: 0a166b1
Author: Chris Nauroth 
Authored: Mon Oct 24 21:22:34 2016 -0700
Committer: Chris Nauroth 
Committed: Mon Oct 24 21:22:34 2016 -0700

--
 .../src/main/resources/core-default.xml | 122 +
 .../fs/s3a/AWSCredentialProviderList.java   |  11 +
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 124 +++--
 ...haredInstanceProfileCredentialsProvider.java |  67 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  52 +++-
 .../fs/s3a/ITestS3AAWSCredentialsProvider.java  | 113 +---
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  42 ++-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 273 +++
 8 files changed, 616 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8fa1cfa/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 59d939b..dbbb3e1 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -41,10 +41,10 @@
 
   hadoop.http.filter.initializers
   org.apache.hadoop.http.lib.StaticUserWebFilter
-  A comma separated list of class names. Each class in the list 
-  must extend org.apache.hadoop.http.FilterInitializer. The corresponding 
-  Filter will be initialized. Then, the Filter will be applied to all user 
-  facing jsp and servlet web pages.  The ordering of the list defines the 
+  A comma separated list of class names. Each class in the list
+  must extend org.apache.hadoop.http.FilterInitializer. The corresponding
+  Filter will be initialized. Then, the Filter will be applied to all user
+  facing jsp and servlet web pages.  The ordering of the list defines the
   ordering of the filters.
 
 
@@ -76,14 +76,14 @@
   hadoop.security.group.mapping
   
org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback
   
-Class for user to group mapping (get groups for a given user) for ACL. 
+Class for user to group mapping (get groups for a given user) for ACL.
 The default implementation,
-org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback, 
-will determine if the Java Native Interface (JNI) is available. If JNI is 
-available the implementation will use the API within hadoop to resolve a 
-list of groups for a user. If JNI is not available then the shell 
-implementation, ShellBasedUnixGroupsMapping, is used.  This implementation 
-shells out to the Linux/Unix environment with the 
+org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback,
+will determine if the Java Native Interface (JNI) is available. If JNI is
+available the implementation will use the API within hadoop to resolve a
+list of groups for a user. If JNI is not available then the shell
+implementation, ShellBasedUnixGroupsMapping, is used.  This implementation
+shells out to the Linux/Unix environment with the
 bash -c groups command to resolve a list of groups for a user.
   
 
@@ -481,10 +481,10 @@
 
   hadoop.rpc.protection
   authentication
-  A comma-separated list of protection values for secured sasl 
+  A comma-separated list of protection values for secured sasl
   connections. Possible values are authentication, integrity and privacy.
-  authentication means authentication only and no integrity or privacy; 
-  integrity implies authentication and integrity are enabled; and privacy 
+  authentication means authentication only and no integrity or privacy;
+  integrity implies authentication and integrity are enabled; and privacy
   implies all of authentication, integrity and privacy are enabled.
   hadoop.security.saslproperties.resolver.class can be used to override
   the hadoop.rpc.protection for a connection at the server side.
@@ -494,10 +494,10 @@
 
   hadoop.security.saslproperties.resolver.class
   
-  

[3/3] hadoop git commit: HADOOP-13727. S3A: Reduce high number of connections to EC2 Instance Metadata Service caused by InstanceProfileCredentialsProvider. Contributed by Chris Nauroth.

2016-10-24 Thread cnauroth
HADOOP-13727. S3A: Reduce high number of connections to EC2 Instance Metadata 
Service caused by InstanceProfileCredentialsProvider. Contributed by Chris 
Nauroth.

(cherry picked from commit d8fa1cfa6722cbf7a4ec3d6b9c44b034da9aa351)
(cherry picked from commit 5b7cbb5a3c1877e51f63f6d6dfb201afa55dc4da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78970e0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78970e0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78970e0d

Branch: refs/heads/branch-2.8
Commit: 78970e0db3cf53a2b804feec8a9e0b906c0800d6
Parents: c829102
Author: Chris Nauroth 
Authored: Mon Oct 24 21:22:34 2016 -0700
Committer: Chris Nauroth 
Committed: Mon Oct 24 21:23:00 2016 -0700

--
 .../src/main/resources/core-default.xml | 122 +
 .../fs/s3a/AWSCredentialProviderList.java   |  11 +
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 124 +++--
 ...haredInstanceProfileCredentialsProvider.java |  67 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  52 +++-
 .../fs/s3a/ITestS3AAWSCredentialsProvider.java  | 113 +---
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  42 ++-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 273 +++
 8 files changed, 616 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78970e0d/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ba40a83..adfe8fd 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -50,10 +50,10 @@
 
   hadoop.http.filter.initializers
   org.apache.hadoop.http.lib.StaticUserWebFilter
-  A comma separated list of class names. Each class in the list 
-  must extend org.apache.hadoop.http.FilterInitializer. The corresponding 
-  Filter will be initialized. Then, the Filter will be applied to all user 
-  facing jsp and servlet web pages.  The ordering of the list defines the 
+  A comma separated list of class names. Each class in the list
+  must extend org.apache.hadoop.http.FilterInitializer. The corresponding
+  Filter will be initialized. Then, the Filter will be applied to all user
+  facing jsp and servlet web pages.  The ordering of the list defines the
   ordering of the filters.
 
 
@@ -85,14 +85,14 @@
   hadoop.security.group.mapping
   
org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback
   
-Class for user to group mapping (get groups for a given user) for ACL. 
+Class for user to group mapping (get groups for a given user) for ACL.
 The default implementation,
-org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback, 
-will determine if the Java Native Interface (JNI) is available. If JNI is 
-available the implementation will use the API within hadoop to resolve a 
-list of groups for a user. If JNI is not available then the shell 
-implementation, ShellBasedUnixGroupsMapping, is used.  This implementation 
-shells out to the Linux/Unix environment with the 
+org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback,
+will determine if the Java Native Interface (JNI) is available. If JNI is
+available the implementation will use the API within hadoop to resolve a
+list of groups for a user. If JNI is not available then the shell
+implementation, ShellBasedUnixGroupsMapping, is used.  This implementation
+shells out to the Linux/Unix environment with the
 bash -c groups command to resolve a list of groups for a user.
   
 
@@ -409,10 +409,10 @@
 
   hadoop.rpc.protection
   authentication
-  A comma-separated list of protection values for secured sasl 
+  A comma-separated list of protection values for secured sasl
   connections. Possible values are authentication, integrity and privacy.
-  authentication means authentication only and no integrity or privacy; 
-  integrity implies authentication and integrity are enabled; and privacy 
+  authentication means authentication only and no integrity or privacy;
+  integrity implies authentication and integrity are enabled; and privacy
   implies all of authentication, integrity and privacy are enabled.
   hadoop.security.saslproperties.resolver.class can be used to override
   the hadoop.rpc.protection for a connection at the server side.
@@ -422,10 +422,10 @@
 
   hadoop.security.saslproperties.resolver.class
   
-  SaslPropertiesResolver used to 

[2/3] hadoop git commit: HADOOP-13727. S3A: Reduce high number of connections to EC2 Instance Metadata Service caused by InstanceProfileCredentialsProvider. Contributed by Chris Nauroth.

2016-10-24 Thread cnauroth
HADOOP-13727. S3A: Reduce high number of connections to EC2 Instance Metadata 
Service caused by InstanceProfileCredentialsProvider. Contributed by Chris 
Nauroth.

(cherry picked from commit d8fa1cfa6722cbf7a4ec3d6b9c44b034da9aa351)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b7cbb5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b7cbb5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b7cbb5a

Branch: refs/heads/branch-2
Commit: 5b7cbb5a3c1877e51f63f6d6dfb201afa55dc4da
Parents: 81595a1
Author: Chris Nauroth 
Authored: Mon Oct 24 21:22:34 2016 -0700
Committer: Chris Nauroth 
Committed: Mon Oct 24 21:22:46 2016 -0700

--
 .../src/main/resources/core-default.xml | 122 +
 .../fs/s3a/AWSCredentialProviderList.java   |  11 +
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 124 +++--
 ...haredInstanceProfileCredentialsProvider.java |  67 +
 .../src/site/markdown/tools/hadoop-aws/index.md |  52 +++-
 .../fs/s3a/ITestS3AAWSCredentialsProvider.java  | 113 +---
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  42 ++-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   | 273 +++
 8 files changed, 616 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b7cbb5a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b4d019b..1beea94 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -50,10 +50,10 @@
 
   hadoop.http.filter.initializers
   org.apache.hadoop.http.lib.StaticUserWebFilter
-  A comma separated list of class names. Each class in the list 
-  must extend org.apache.hadoop.http.FilterInitializer. The corresponding 
-  Filter will be initialized. Then, the Filter will be applied to all user 
-  facing jsp and servlet web pages.  The ordering of the list defines the 
+  A comma separated list of class names. Each class in the list
+  must extend org.apache.hadoop.http.FilterInitializer. The corresponding
+  Filter will be initialized. Then, the Filter will be applied to all user
+  facing jsp and servlet web pages.  The ordering of the list defines the
   ordering of the filters.
 
 
@@ -85,14 +85,14 @@
   hadoop.security.group.mapping
   
org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback
   
-Class for user to group mapping (get groups for a given user) for ACL. 
+Class for user to group mapping (get groups for a given user) for ACL.
 The default implementation,
-org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback, 
-will determine if the Java Native Interface (JNI) is available. If JNI is 
-available the implementation will use the API within hadoop to resolve a 
-list of groups for a user. If JNI is not available then the shell 
-implementation, ShellBasedUnixGroupsMapping, is used.  This implementation 
-shells out to the Linux/Unix environment with the 
+org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback,
+will determine if the Java Native Interface (JNI) is available. If JNI is
+available the implementation will use the API within hadoop to resolve a
+list of groups for a user. If JNI is not available then the shell
+implementation, ShellBasedUnixGroupsMapping, is used.  This implementation
+shells out to the Linux/Unix environment with the
 bash -c groups command to resolve a list of groups for a user.
   
 
@@ -490,10 +490,10 @@
 
   hadoop.rpc.protection
   authentication
-  A comma-separated list of protection values for secured sasl 
+  A comma-separated list of protection values for secured sasl
   connections. Possible values are authentication, integrity and privacy.
-  authentication means authentication only and no integrity or privacy; 
-  integrity implies authentication and integrity are enabled; and privacy 
+  authentication means authentication only and no integrity or privacy;
+  integrity implies authentication and integrity are enabled; and privacy
   implies all of authentication, integrity and privacy are enabled.
   hadoop.security.saslproperties.resolver.class can be used to override
   the hadoop.rpc.protection for a connection at the server side.
@@ -503,10 +503,10 @@
 
   hadoop.security.saslproperties.resolver.class
   
-  SaslPropertiesResolver used to resolve the QOP used for a 
-  connection. If not specified, the 

hadoop git commit: HADOOP-13452. S3Guard: Implement access policy for intra-client consistency with in-memory metadata store. Contributed by Aaron Fabbri.

2016-10-19 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 dc3e57975 -> d7af9c515


HADOOP-13452. S3Guard: Implement access policy for intra-client consistency 
with in-memory metadata store. Contributed by Aaron Fabbri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7af9c51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7af9c51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7af9c51

Branch: refs/heads/HADOOP-13345
Commit: d7af9c515b65daf9b25ab4418d1c7990b1ddd502
Parents: dc3e579
Author: Chris Nauroth 
Authored: Wed Oct 19 16:32:46 2016 -0700
Committer: Chris Nauroth 
Committed: Wed Oct 19 16:32:46 2016 -0700

--
 .../fs/s3a/s3guard/DirListingMetadata.java  |   9 +
 .../fs/s3a/s3guard/LocalMetadataStore.java  | 216 +++
 .../hadoop/fs/s3a/s3guard/LruHashMap.java   |  50 +
 .../hadoop/fs/s3a/s3guard/MetadataStore.java|  14 +-
 .../hadoop/fs/s3a/s3guard/PathMetadata.java |   7 +-
 .../fs/s3a/s3guard/AbstractMSContract.java  |   5 +-
 .../fs/s3a/s3guard/MetadataStoreTestBase.java   |   8 +-
 .../fs/s3a/s3guard/TestLocalMetadataStore.java  |  65 ++
 8 files changed, 359 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7af9c51/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
index acb3e51..1838f42 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DirListingMetadata.java
@@ -38,6 +38,13 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class DirListingMetadata {
 
+  /**
+   * Convenience parameter for passing into
+   * {@link DirListingMetadata#DirListingMetadata(Path, Collection, boolean)}.
+   */
+  public static final Collection EMPTY_DIR =
+  Collections.emptyList();
+
   private final Path path;
 
   /** Using a map for fast find / remove with large directories. */
@@ -151,6 +158,8 @@ public class DirListingMetadata {
*/
   private void checkChildPath(Path childPath) {
 Preconditions.checkNotNull(childPath, "childPath must be non-null");
+Preconditions.checkArgument(childPath.isAbsolute(), "childPath must be " +
+"absolute");
 Preconditions.checkArgument(!childPath.isRoot(),
 "childPath cannot be the root path");
 Preconditions.checkArgument(childPath.getParent().equals(path),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7af9c51/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
new file mode 100644
index 000..d47d85e
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.s3guard;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+
+/**
+ * This is a local, in-memory, implementation of MetadataStore.
+ * This is 

[21/52] [abbrv] hadoop git commit: MAPREDUCE-6776. yarn.app.mapreduce.client.job.max-retries should have a more useful default (miklos.szeg...@cloudera.com via rkanter)

2016-10-12 Thread cnauroth
MAPREDUCE-6776. yarn.app.mapreduce.client.job.max-retries should have a more 
useful default (miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3f37e6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3f37e6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3f37e6f

Branch: refs/heads/HADOOP-13037
Commit: f3f37e6fb8172f6434e06eb9a137c0c155b3952e
Parents: 2e853be
Author: Robert Kanter 
Authored: Fri Oct 7 14:47:06 2016 -0700
Committer: Robert Kanter 
Committed: Fri Oct 7 14:47:06 2016 -0700

--
 .../apache/hadoop/mapreduce/MRJobConfig.java|  2 +-
 .../src/main/resources/mapred-default.xml   | 10 +++---
 .../apache/hadoop/mapred/JobClientUnitTest.java | 34 
 3 files changed, 34 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 5716404..1325b74 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -505,7 +505,7 @@ public interface MRJobConfig {
*/
   public static final String MR_CLIENT_JOB_MAX_RETRIES =
   MR_PREFIX + "client.job.max-retries";
-  public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 0;
+  public static final int DEFAULT_MR_CLIENT_JOB_MAX_RETRIES = 3;
 
   /**
* How long to wait between jobclient retries on failure

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 73aaa7a..fe29212 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1505,12 +1505,12 @@
 
 
   yarn.app.mapreduce.client.job.max-retries
-  0
+  3
   The number of retries the client will make for getJob and
-  dependent calls.  The default is 0 as this is generally only needed for
-  non-HDFS DFS where additional, high level retries are required to avoid
-  spurious failures during the getJob call.  30 is a good value for
-  WASB
+dependent calls.
+This is needed for non-HDFS DFS where additional, high level
+retries are required to avoid spurious failures during the getJob call.
+30 is a good value for WASB
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3f37e6f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
index 4895a5b..e02232d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java
@@ -225,10 +225,10 @@ public class JobClientUnitTest {
 
 //To prevent the test from running for a very long time, lower the retry
 JobConf conf = new JobConf();
-conf.set(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, "3");
+conf.setInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES, 2);
 
 TestJobClientGetJob client = new TestJobClientGetJob(conf);
-JobID id = new JobID("ajob",1);
+JobID id = new JobID("ajob", 1);
  

[27/52] [abbrv] hadoop git commit: HADOOP-12579. Deprecate WriteableRPCEngine. Contributed by Wei Zhou

2016-10-12 Thread cnauroth
HADOOP-12579. Deprecate WriteableRPCEngine. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec0b7071
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec0b7071
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec0b7071

Branch: refs/heads/HADOOP-13037
Commit: ec0b70716c8e6509654a3975d3ca139a0144cc8e
Parents: 4d10621
Author: Kai Zheng 
Authored: Sun Oct 9 15:07:03 2016 +0600
Committer: Kai Zheng 
Committed: Sun Oct 9 15:07:03 2016 +0600

--
 .../src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec0b7071/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index a9dbb41..3d6d461 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -46,6 +46,7 @@ import org.apache.htrace.core.Tracer;
 
 /** An RpcEngine implementation for Writable data. */
 @InterfaceStability.Evolving
+@Deprecated
 public class WritableRpcEngine implements RpcEngine {
   private static final Log LOG = LogFactory.getLog(RPC.class);
   
@@ -331,6 +332,7 @@ public class WritableRpcEngine implements RpcEngine {
 
 
   /** An RPC Server. */
+  @Deprecated
   public static class Server extends RPC.Server {
 /** 
  * Construct an RPC server.
@@ -443,7 +445,8 @@ public class WritableRpcEngine implements RpcEngine {
 value = value.substring(0, 55)+"...";
   LOG.info(value);
 }
-
+
+@Deprecated
 static class WritableRpcInvoker implements RpcInvoker {
 
  @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/52] [abbrv] hadoop git commit: HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.

2016-10-12 Thread cnauroth
HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3565c9af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3565c9af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3565c9af

Branch: refs/heads/HADOOP-13037
Commit: 3565c9af17ab05bf9e7f68b71b6c6850df772bb9
Parents: 69620f95
Author: Kihwal Lee 
Authored: Fri Oct 7 14:14:47 2016 -0500
Committer: Kihwal Lee 
Committed: Fri Oct 7 14:15:59 2016 -0500

--
 .../hdfs/server/namenode/FSDirDeleteOp.java | 63 ++--
 .../hdfs/server/namenode/FSEditLogLoader.java   | 11 ++--
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 3 files changed, 38 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3565c9af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 21ee3ce..328ce79 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -55,7 +55,7 @@ class FSDirDeleteOp {
 FSNamesystem fsn = fsd.getFSNamesystem();
 fsd.writeLock();
 try {
-  if (deleteAllowed(iip, iip.getPath()) ) {
+  if (deleteAllowed(iip)) {
 List snapshottableDirs = new ArrayList<>();
 FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
 ReclaimContext context = new ReclaimContext(
@@ -98,20 +98,24 @@ class FSDirDeleteOp {
 FSDirectory fsd = fsn.getFSDirectory();
 FSPermissionChecker pc = fsd.getPermissionChecker();
 
-final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
-src = iip.getPath();
-if (!recursive && fsd.isNonEmptyDirectory(iip)) {
-  throw new PathIsNotEmptyDirectoryException(src + " is non empty");
+if (FSDirectory.isExactReservedName(src)) {
+  throw new InvalidPathException(src);
 }
+
+final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
 if (fsd.isPermissionEnabled()) {
   fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
   FsAction.ALL, true);
 }
-if (recursive && fsd.isNonEmptyDirectory(iip)) {
-  checkProtectedDescendants(fsd, src);
+if (fsd.isNonEmptyDirectory(iip)) {
+  if (!recursive) {
+throw new PathIsNotEmptyDirectoryException(
+iip.getPath() + " is non empty");
+  }
+  checkProtectedDescendants(fsd, iip);
 }
 
-return deleteInternal(fsn, src, iip, logRetryCache);
+return deleteInternal(fsn, iip, logRetryCache);
   }
 
   /**
@@ -126,17 +130,14 @@ class FSDirDeleteOp {
* @param src a string representation of a path to an inode
* @param mtime the time the inode is removed
*/
-  static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
+  static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)
   throws IOException {
 assert fsd.hasWriteLock();
 FSNamesystem fsn = fsd.getFSNamesystem();
 BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
 List removedINodes = new ChunkedArrayList<>();
 List removedUCFiles = new ChunkedArrayList<>();
-
-final INodesInPath iip = fsd.getINodesInPath4Write(
-FSDirectory.normalizePath(src), false);
-if (!deleteAllowed(iip, src)) {
+if (!deleteAllowed(iip)) {
   return;
 }
 List snapshottableDirs = new ArrayList<>();
@@ -162,7 +163,6 @@ class FSDirDeleteOp {
* 
* For small directory or file the deletion is done in one shot.
* @param fsn namespace
-   * @param src path name to be deleted
* @param iip the INodesInPath instance containing all the INodes for the 
path
* @param logRetryCache whether to record RPC ids in editlog for retry cache
*  rebuilding
@@ -170,15 +170,11 @@ class FSDirDeleteOp {
* @throws IOException
*/
   static BlocksMapUpdateInfo deleteInternal(
-  FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache)
+  FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
   throws IOException {
 assert fsn.hasWriteLock();
 if (NameNode.stateChangeLog.isDebugEnabled()) {
-  NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
-}
-
-if (FSDirectory.isExactReservedName(src)) {
-  throw new 

[30/52] [abbrv] hadoop git commit: HDFS-10895. Update HDFS Erasure Coding doc to add how to use ISA-L based coder. Contributed by Sammi Chen

2016-10-12 Thread cnauroth
HDFS-10895. Update HDFS Erasure Coding doc to add how to use ISA-L based coder. 
Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af50da32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af50da32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af50da32

Branch: refs/heads/HADOOP-13037
Commit: af50da3298f92a52cc20d5f6aab6f6ad8134efbd
Parents: 3d59b18
Author: Kai Zheng 
Authored: Mon Oct 10 11:55:49 2016 +0600
Committer: Kai Zheng 
Committed: Mon Oct 10 11:55:49 2016 +0600

--
 .../src/site/markdown/HDFSErasureCoding.md   | 15 ++-
 1 file changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af50da32/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 18b3a25..627260f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -22,6 +22,7 @@ HDFS Erasure Coding
 * [Deployment](#Deployment)
 * [Cluster and hardware 
configuration](#Cluster_and_hardware_configuration)
 * [Configuration keys](#Configuration_keys)
+* [Enable Intel ISA-L](#Enable_Intel_ISA-L)
 * [Administrative commands](#Administrative_commands)
 
 Purpose
@@ -73,6 +74,9 @@ Architecture
 
 There are three policies currently being supported: RS-DEFAULT-3-2-64k, 
RS-DEFAULT-6-3-64k and RS-LEGACY-6-3-64k. All with default cell size of 64KB. 
The system default policy is RS-DEFAULT-6-3-64k which use the default schema 
RS_6_3_SCHEMA with a cell size of 64KB.
 
+ *  **Intel ISA-L**
+Intel ISA-L stands for Intel Intelligent Storage Acceleration Library. 
ISA-L is a collection of optimized low-level functions used primarily in 
storage applications. It includes a fast block Reed-Solomon type erasure codes 
optimized for Intel AVX and AVX2 instruction sets.
+HDFS EC can leverage this open-source library to accelerate encoding and 
decoding calculation. ISA-L supports most of major operating systems, including 
Linux and Windows. By default, ISA-L is not enabled in HDFS.
 
 Deployment
 --
@@ -98,7 +102,7 @@ Deployment
   `io.erasurecode.codec.rs-default.rawcoder` for the default RS codec,
   `io.erasurecode.codec.rs-legacy.rawcoder` for the legacy RS codec,
   `io.erasurecode.codec.xor.rawcoder` for the XOR codec.
-  The default implementations for all of these codecs are pure Java.
+  The default implementations for all of these codecs are pure Java. For 
default RS codec, there is also a native implementation which leverages Intel 
ISA-L library to improve the encoding and decoding calculation. Please refer to 
section "Enable Intel ISA-L" for more detail information.
 
   Erasure coding background recovery work on the DataNodes can also be tuned 
via the following configuration parameters:
 
@@ -106,6 +110,15 @@ Deployment
   1. `dfs.datanode.stripedread.threads` - Number of concurrent reader threads. 
Default value is 20 threads.
   1. `dfs.datanode.stripedread.buffer.size` - Buffer size for reader service. 
Default value is 256KB.
 
+### Enable Intel ISA-L
+
+  HDFS native implementation of default RS codec leverages Intel ISA-L library 
to improve the encoding and decoding calculation. To enable and use Intel 
ISA-L, there are three steps.
+  1. Build ISA-L library. Please refer to the offical site 
"https://github.com/01org/isa-l/; for detail information.
+  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build 
options" section in "Build instructions for Hadoop"(BUILDING.txt) document. Use 
-Dbundle.isal to copy the contents of the isal.lib directory into the final tar 
file. Deploy hadoop with the tar file. Make sure ISA-L library is available on 
both HDFS client and DataNodes.
+  3. Configure the `io.erasurecode.codec.rs-default.rawcoder` key with value 
`org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on 
HDFS client and DataNodes.
+
+  To check ISA-L library enable state, try "Hadoop checknative" command. It 
will tell you if ISA-L library is enabled or not.
+
 ### Administrative commands
 
   HDFS provides an `erasurecode` subcommand to perform administrative commands 
related to erasure coding.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/52] [abbrv] hadoop git commit: HADOOP-13697. LogLevel#main should not throw exception if no arguments. Contributed by Mingliang Liu

2016-10-12 Thread cnauroth
HADOOP-13697. LogLevel#main should not throw exception if no arguments. 
Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fb392a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fb392a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fb392a5

Branch: refs/heads/HADOOP-13037
Commit: 2fb392a587d288b628936ca6d18fabad04afc585
Parents: 809cfd2
Author: Mingliang Liu 
Authored: Fri Oct 7 14:05:40 2016 -0700
Committer: Mingliang Liu 
Committed: Tue Oct 11 10:57:08 2016 -0700

--
 .../src/main/java/org/apache/hadoop/log/LogLevel.java   | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fb392a5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 4fa839f..79eae12 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -47,15 +47,17 @@ import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * Change log level in runtime.
  */
 @InterfaceStability.Evolving
 public class LogLevel {
-  public static final String USAGES = "\nUsage: General options are:\n"
+  public static final String USAGES = "\nUsage: Command options are:\n"
   + "\t[-getlevel   [-protocol (http|https)]\n"
   + "\t[-setlevel"
   + "[-protocol (http|https)]\n";
@@ -67,7 +69,7 @@ public class LogLevel {
*/
   public static void main(String[] args) throws Exception {
 CLI cli = new CLI(new Configuration());
-System.exit(cli.run(args));
+System.exit(ToolRunner.run(cli, args));
   }
 
   /**
@@ -81,6 +83,7 @@ public class LogLevel {
 
   private static void printUsage() {
 System.err.println(USAGES);
+GenericOptionsParser.printGenericCommandUsage(System.err);
   }
 
   public static boolean isValidProtocol(String protocol) {
@@ -107,7 +110,7 @@ public class LogLevel {
 sendLogLevelRequest();
   } catch (HadoopIllegalArgumentException e) {
 printUsage();
-throw e;
+return -1;
   }
   return 0;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/52] [abbrv] hadoop git commit: HADOOP-13678 Update jackson from 1.9.13 to 2.x in hadoop-tools. Contributed by Akira Ajisaka.

2016-10-12 Thread cnauroth
HADOOP-13678 Update jackson from 1.9.13 to 2.x in hadoop-tools. Contributed by 
Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cc841f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cc841f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cc841f1

Branch: refs/heads/HADOOP-13037
Commit: 2cc841f16ec9aa5336495fc20ee781a1276fddc5
Parents: 4d2f380
Author: Steve Loughran 
Authored: Thu Oct 6 16:30:26 2016 +0100
Committer: Steve Loughran 
Committed: Thu Oct 6 16:31:00 2016 +0100

--
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  4 +++
 ...ClientCredentialBasedAccesTokenProvider.java |  5 +--
 hadoop-tools/hadoop-azure/pom.xml   |  6 +++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 16 -
 hadoop-tools/hadoop-openstack/pom.xml   | 18 +-
 .../swift/auth/ApiKeyAuthenticationRequest.java |  2 +-
 .../fs/swift/auth/entities/AccessToken.java |  2 +-
 .../hadoop/fs/swift/auth/entities/Catalog.java  |  2 +-
 .../hadoop/fs/swift/auth/entities/Endpoint.java |  2 +-
 .../hadoop/fs/swift/auth/entities/Tenant.java   |  2 +-
 .../hadoop/fs/swift/auth/entities/User.java |  2 +-
 .../snative/SwiftNativeFileSystemStore.java |  3 +-
 .../apache/hadoop/fs/swift/util/JSONUtil.java   | 24 +
 hadoop-tools/hadoop-rumen/pom.xml   |  9 +
 .../apache/hadoop/tools/rumen/Anonymizer.java   | 23 ++---
 .../hadoop/tools/rumen/HadoopLogsAnalyzer.java  |  3 +-
 .../tools/rumen/JsonObjectMapperParser.java | 17 -
 .../tools/rumen/JsonObjectMapperWriter.java | 21 +---
 .../apache/hadoop/tools/rumen/LoggedJob.java|  2 +-
 .../hadoop/tools/rumen/LoggedLocation.java  |  2 +-
 .../tools/rumen/LoggedNetworkTopology.java  |  2 +-
 .../rumen/LoggedSingleRelativeRanking.java  |  4 +--
 .../apache/hadoop/tools/rumen/LoggedTask.java   |  2 +-
 .../hadoop/tools/rumen/LoggedTaskAttempt.java   |  2 +-
 .../hadoop/tools/rumen/datatypes/NodeName.java  |  2 +-
 .../rumen/serializers/BlockingSerializer.java   | 10 +++---
 .../DefaultAnonymizingRumenSerializer.java  |  8 ++---
 .../serializers/DefaultRumenSerializer.java |  9 ++---
 .../serializers/ObjectStringSerializer.java | 10 +++---
 .../apache/hadoop/tools/rumen/state/State.java  |  2 +-
 .../tools/rumen/state/StateDeserializer.java| 14 
 .../hadoop/tools/rumen/state/StatePool.java | 36 
 .../hadoop/tools/rumen/TestHistograms.java  | 13 +++
 hadoop-tools/hadoop-sls/pom.xml |  4 +++
 .../hadoop/yarn/sls/RumenToSLSConverter.java|  8 ++---
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |  7 ++--
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  | 10 +++---
 37 files changed, 151 insertions(+), 157 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index c07a1d7..e1a0bfe 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -181,5 +181,9 @@
   2.4.0
   test
 
+
+  com.fasterxml.jackson.core
+  jackson-databind
+
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cc841f1/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
index 6dfc593..11d07e7 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/hdfs/web/oauth2/AzureADClientCredentialBasedAccesTokenProvider.java
@@ -18,6 +18,9 @@
  */
 package org.apache.hadoop.hdfs.web.oauth2;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import com.fasterxml.jackson.databind.ObjectReader;
 import com.squareup.okhttp.OkHttpClient;
 import com.squareup.okhttp.Request;
 import com.squareup.okhttp.RequestBody;
@@ -29,8 +32,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.util.Timer;
 import org.apache.http.HttpStatus;
-import org.codehaus.jackson.map.ObjectMapper;
-import 

[14/52] [abbrv] hadoop git commit: HADOOP-12611. TestZKSignerSecretProvider#testMultipleInit occasionally fail (ebadger via rkanter)

2016-10-12 Thread cnauroth
HADOOP-12611. TestZKSignerSecretProvider#testMultipleInit occasionally fail 
(ebadger via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c183b9de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c183b9de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c183b9de

Branch: refs/heads/HADOOP-13037
Commit: c183b9de8d072a35dcde96a20b1550981f886e86
Parents: 459a483
Author: Robert Kanter 
Authored: Fri Oct 7 09:33:24 2016 -0700
Committer: Robert Kanter 
Committed: Fri Oct 7 09:33:31 2016 -0700

--
 .../util/RolloverSignerSecretProvider.java  |   2 +-
 .../util/TestZKSignerSecretProvider.java| 221 +--
 2 files changed, 100 insertions(+), 123 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c183b9de/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
index fda5572..66b2fde 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
 public abstract class RolloverSignerSecretProvider
 extends SignerSecretProvider {
 
-  private static Logger LOG = LoggerFactory.getLogger(
+  static Logger LOG = LoggerFactory.getLogger(
 RolloverSignerSecretProvider.class);
   /**
* Stores the currently valid secrets.  The current secret is the 0th element

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c183b9de/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index 8211314..5e640bb 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -17,7 +17,12 @@ import java.util.Arrays;
 import java.util.Properties;
 import java.util.Random;
 import javax.servlet.ServletContext;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.curator.test.TestingServer;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -25,7 +30,6 @@ import org.junit.Test;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -34,9 +38,14 @@ public class TestZKSignerSecretProvider {
   private TestingServer zkServer;
 
   // rollover every 2 sec
-  private final int timeout = 4000;
+  private final int timeout = 100;
   private final long rolloverFrequency = timeout / 2;
 
+  static final Log LOG = LogFactory.getLog(TestZKSignerSecretProvider.class);
+  {
+LogManager.getLogger( RolloverSignerSecretProvider.LOG.getName() 
).setLevel(Level.DEBUG);
+  }
+
   @Before
   public void setup() throws Exception {
 zkServer = new TestingServer();
@@ -60,8 +69,8 @@ public class TestZKSignerSecretProvider {
 byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
 byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
 byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
-ZKSignerSecretProvider secretProvider =
-spy(new ZKSignerSecretProvider(seed));
+MockZKSignerSecretProvider secretProvider =
+spy(new MockZKSignerSecretProvider(seed));
 Properties config = new Properties();
 config.setProperty(
 ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
@@ -77,7 +86,8 @@ public class TestZKSignerSecretProvider {
   Assert.assertEquals(2, allSecrets.length);
   Assert.assertArrayEquals(secret1, allSecrets[0]);
   

[12/52] [abbrv] hadoop git commit: HADOOP-12977 s3a to handle delete("/", true) robustly. Contributed by Steve Loughran.

2016-10-12 Thread cnauroth
HADOOP-12977 s3a to handle delete("/", true) robustly. Contributed by Steve 
Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebd4f39a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebd4f39a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebd4f39a

Branch: refs/heads/HADOOP-13037
Commit: ebd4f39a393e5fa9a810c6a36b749549229a53df
Parents: bf37217
Author: Steve Loughran 
Authored: Fri Oct 7 12:51:40 2016 +0100
Committer: Steve Loughran 
Committed: Fri Oct 7 12:51:40 2016 +0100

--
 .../src/site/markdown/filesystem/filesystem.md  | 77 +++-
 .../apache/hadoop/fs/FileContextURIBase.java|  4 +-
 .../AbstractContractRootDirectoryTest.java  | 34 -
 .../hadoop/fs/contract/ContractTestUtils.java   | 39 ++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 77 
 5 files changed, 197 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd4f39a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index 1587842..2c9dd5d 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -669,19 +669,40 @@ exists in the metadata, but no copies of any its blocks 
can be located;
 
 ### `boolean delete(Path p, boolean recursive)`
 
+Delete a path, be it a file, symbolic link or directory. The
+`recursive` flag indicates whether a recursive delete should take place —if
+unset then a non-empty directory cannot be deleted.
+
+Except in the special case of the root directory, if this API call
+completed successfully then there is nothing at the end of the path.
+That is: the outcome is desired. The return flag simply tells the caller
+whether or not any change was made to the state of the filesystem.
+
+*Note*: many uses of this method surround it with checks for the return value 
being
+false, raising exception if so. For example
+
+```java
+if (!fs.delete(path, true)) throw new IOException("Could not delete " + path);
+```
+
+This pattern is not needed. Code SHOULD just call `delete(path, recursive)` and
+assume the destination is no longer present —except in the special case of 
root
+directories, which will always remain (see below for special coverage of root 
directories).
+
  Preconditions
 
-A directory with children and recursive == false cannot be deleted
+A directory with children and `recursive == False` cannot be deleted
 
 if isDir(FS, p) and not recursive and (children(FS, p) != {}) : raise 
IOException
 
+(HDFS raises `PathIsNotEmptyDirectoryException` here.)
 
  Postconditions
 
 
 # Nonexistent path
 
-If the file does not exist the FS state does not change
+If the file does not exist the filesystem state does not change
 
 if not exists(FS, p):
 FS' = FS
@@ -700,7 +721,7 @@ A path referring to a file is removed, return value: `True`
 result = True
 
 
-# Empty root directory
+# Empty root directory, `recursive == False`
 
 Deleting an empty root does not change the filesystem state
 and may return true or false.
@@ -711,7 +732,10 @@ and may return true or false.
 
 There is no consistent return code from an attempt to delete the root 
directory.
 
-# Empty (non-root) directory
+Implementations SHOULD return true; this avoids code which checks for a false
+return value from overreacting.
+
+# Empty (non-root) directory `recursive == False`
 
 Deleting an empty directory that is not root will remove the path from the FS 
and
 return true.
@@ -721,26 +745,41 @@ return true.
 result = True
 
 
-# Recursive delete of root directory
+# Recursive delete of non-empty root directory
 
 Deleting a root path with children and `recursive==True`
  can do one of two things.
 
-The POSIX model assumes that if the user has
+1. The POSIX model assumes that if the user has
 the correct permissions to delete everything,
 they are free to do so (resulting in an empty filesystem).
 
-if isDir(FS, p) and isRoot(p) and recursive :
-FS' = ({["/"]}, {}, {}, {})
-result = True
+if isDir(FS, p) and isRoot(p) and recursive :
+FS' = ({["/"]}, {}, {}, {})
+result = True
 
-In contrast, HDFS never permits the deletion of the root of a filesystem; the
-filesystem can be taken offline and reformatted if an empty
+1. HDFS never permits the deletion of the root of a filesystem; 

[39/52] [abbrv] hadoop git commit: HDFS-10637. Modifications to remove the assumption that FsVolumes are backed by java.io.File. (Virajith Jalaparti via lei)

2016-10-12 Thread cnauroth
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b12662/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 57fab66..76af724 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -23,11 +23,13 @@ import java.io.FileOutputStream;
 import java.io.FilenameFilter;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
+import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.nio.file.StandardCopyOption;
 import java.util.Collections;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -56,13 +58,18 @@ import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.BlockDirFilter;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.CloseableReferenceCount;
@@ -102,8 +109,14 @@ public class FsVolumeImpl implements FsVolumeSpi {
   private final StorageType storageType;
   private final Map bpSlices
   = new ConcurrentHashMap();
+
+  // Refers to the base StorageLocation used to construct this volume
+  // (i.e., does not include STORAGE_DIR_CURRENT in
+  // /STORAGE_DIR_CURRENT/)
+  private final StorageLocation storageLocation;
+
   private final File currentDir;// /current
-  private final DF usage;   
+  private final DF usage;
   private final long reserved;
   private CloseableReferenceCount reference = new CloseableReferenceCount();
 
@@ -124,19 +137,25 @@ public class FsVolumeImpl implements FsVolumeSpi {
*/
   protected ThreadPoolExecutor cacheExecutor;
   
-  FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
-  Configuration conf, StorageType storageType) throws IOException {
+  FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
+  Configuration conf) throws IOException {
+
+if (sd.getStorageLocation() == null) {
+  throw new IOException("StorageLocation specified for storage directory " 
+
+  sd + " is null");
+}
 this.dataset = dataset;
 this.storageID = storageID;
+this.reservedForReplicas = new AtomicLong(0L);
+this.storageLocation = sd.getStorageLocation();
+this.currentDir = sd.getCurrentDir();
+File parent = currentDir.getParentFile();
+this.usage = new DF(parent, conf);
+this.storageType = storageLocation.getStorageType();
 this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
 + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
 DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
 DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
-this.reservedForReplicas = new AtomicLong(0L);
-this.currentDir = currentDir;
-File parent = currentDir.getParentFile();
-this.usage = new DF(parent, conf);
-this.storageType = storageType;
 this.configuredCapacity = -1;
 this.conf = conf;
 cacheExecutor = initializeCacheExecutor(parent);
@@ -285,19 +304,20 @@ public class FsVolumeImpl implements FsVolumeSpi {
 return true;
   }
 
+  @VisibleForTesting
   File getCurrentDir() {
 return currentDir;
   }
   
-  File getRbwDir(String bpid) throws IOException {
+  protected File 

[41/52] [abbrv] hadoop git commit: YARN-5551. Ignore file backed pages from memory computation when smaps is enabled. Contributed by Rajesh Balamohan

2016-10-12 Thread cnauroth
YARN-5551. Ignore file backed pages from memory computation when smaps is 
enabled. Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecb51b85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecb51b85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecb51b85

Branch: refs/heads/HADOOP-13037
Commit: ecb51b857ac7faceff981b2b6f22ea1af0d42ab1
Parents: 96b1266
Author: Jason Lowe 
Authored: Tue Oct 11 15:12:43 2016 +
Committer: Jason Lowe 
Committed: Tue Oct 11 15:12:43 2016 +

--
 .../yarn/util/ProcfsBasedProcessTree.java   | 26 ++-
 .../yarn/util/TestProcfsBasedProcessTree.java   | 46 ++--
 2 files changed, 39 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecb51b85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 80d49c3..29bc277 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -406,15 +406,14 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 continue;
   }
 
-  total +=
-  Math.min(info.sharedDirty, info.pss) + info.privateDirty
-  + info.privateClean;
+  // Account for anonymous to know the amount of
+  // memory reclaimable by killing the process
+  total += info.anonymous;
+
   if (LOG.isDebugEnabled()) {
 LOG.debug(" total(" + olderThanAge + "): PID : " + p.getPid()
-+ ", SharedDirty : " + info.sharedDirty + ", PSS : "
-+ info.pss + ", Private_Dirty : " + info.privateDirty
-+ ", Private_Clean : " + info.privateClean + ", total : "
-+ (total * KB_TO_BYTES));
++ ", info : " + info.toString()
++ ", total : " + (total * KB_TO_BYTES));
   }
 }
   }
@@ -877,6 +876,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 private int sharedDirty;
 private int privateClean;
 private int privateDirty;
+private int anonymous;
 private int referenced;
 private String regionName;
 private String permission;
@@ -929,6 +929,10 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   return referenced;
 }
 
+public int getAnonymous() {
+  return anonymous;
+}
+
 public void setMemInfo(String key, String value) {
   MemInfo info = MemInfo.getMemInfoByName(key);
   int val = 0;
@@ -969,6 +973,9 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   case REFERENCED:
 referenced = val;
 break;
+  case ANONYMOUS:
+anonymous = val;
+break;
   default:
 break;
   }
@@ -999,10 +1006,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 .append(MemInfo.REFERENCED.name + ":" + this.getReferenced())
 .append(" kB\n");
   sb.append("\t")
-.append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
-.append(" kB\n");
-  sb.append("\t")
-.append(MemInfo.PRIVATE_DIRTY.name + ":" + this.getPrivateDirty())
+.append(MemInfo.ANONYMOUS.name + ":" + this.getAnonymous())
 .append(" kB\n");
   return sb.toString();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecb51b85/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index fa4e8c8..841d333 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ 

  1   2   3   4   5   6   7   8   9   10   >