[hadoop] branch branch-3.3 updated: HADOOP-17159. Make UGI support forceful relogin from keytab ignoring the last login time (#2249)

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new fcb80c1  HADOOP-17159. Make UGI support forceful relogin from keytab 
ignoring the last login time (#2249)
fcb80c1 is described below

commit fcb80c1ade5162b323b7138984f19af673a29ebd
Author: sguggilam 
AuthorDate: Wed Aug 26 23:45:21 2020 -0700

HADOOP-17159. Make UGI support forceful relogin from keytab ignoring the 
last login time (#2249)

Contributed by Sandeep Guggilam.

Signed-off-by: Mingliang Liu 
Signed-off-by: Steve Loughran 
---
 .../hadoop/security/UserGroupInformation.java  | 36 ++
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 ++
 2 files changed, 66 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index d37da72..dcee9f4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1233,7 +1233,29 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
+  /**
+   * Force re-Login a user in from a keytab file irrespective of the last login
+   * time. Loads a user identity from a keytab file and logs them in. They
+   * become the currently logged-in user. This method assumes that
+   * {@link #loginUserFromKeytab(String, String)} had happened already. The
+   * Subject field of this UserGroupInformation object is updated to have the
+   * new credentials.
+   *
+   * @throws IOException
+   * @throws KerberosAuthException on a failure
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public void forceReloginFromKeytab() throws IOException {
+reloginFromKeytab(false, true);
+  }
+
   private void reloginFromKeytab(boolean checkTGT) throws IOException {
+reloginFromKeytab(checkTGT, false);
+  }
+
+  private void reloginFromKeytab(boolean checkTGT, boolean ignoreLastLoginTime)
+  throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1248,7 +1270,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login);
+relogin(login, ignoreLastLoginTime);
   }
 
   /**
@@ -1269,25 +1291,27 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login);
+relogin(login, false);
   }
 
-  private void relogin(HadoopLoginContext login) throws IOException {
+  private void relogin(HadoopLoginContext login, boolean ignoreLastLoginTime)
+  throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login);
+unprotectedRelogin(login, ignoreLastLoginTime);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
+  private void unprotectedRelogin(HadoopLoginContext login,
+  boolean ignoreLastLoginTime) throws IOException {
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now)) {
+if (!hasSufficientTimeElapsed(now) && !ignoreLastLoginTime) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index d233234..db0095f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -158,6 +158,42 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
+  /**
+   * Force re-login from keytab using the MiniKDC and verify the UGI can
+   * successfully relogin from keytab as well.
+   */
+  @Test
+  public void testUGIForceReLoginFromKeytab() throws Exception {
+// Set this to false as we are testing force re-login anyways
+UserGroupInformation.setShouldRenewImmediatelyForTests(false);
+String principal = "foo";
+File keytab = new File(workDir, "foo.keytab");
+kdc.createP

[hadoop] branch trunk updated (2ffe00f -> d8aaa8c)

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 2ffe00f  HDFS-15540. Directories protected from delete can still be 
moved to the trash. Contributed by Stephen O'Donnell.
 add d8aaa8c  HADOOP-17159. Make UGI support forceful relogin from keytab 
ignoring the last login time (#2249)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/security/UserGroupInformation.java  | 36 ++
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 ++
 2 files changed, 66 insertions(+), 6 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated (ee7d214 -> a9ce600)

2020-08-26 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ee7d214  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"
 new 9eaa352  HDFS-15243. Add an option to prevent sub-directories of 
protected directories from deletion. Contributed by liuyanyu.
 new a9ce600  HDFS-15540. Directories protected from delete can still be 
moved to the trash. Contributed by Stephen O'Donnell.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  10 ++
 .../main/java/org/apache/hadoop/hdfs/DFSUtil.java  |  12 ++
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |   5 +
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  10 ++
 .../src/main/resources/hdfs-default.xml|   8 ++
 .../server/namenode/TestProtectedDirectories.java  | 151 +
 6 files changed, 196 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HDFS-15540. Directories protected from delete can still be moved to the trash. Contributed by Stephen O'Donnell.

2020-08-26 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit a9ce6001eaf3b94ca15ccdb0330784a078b9761d
Author: Stephen O'Donnell 
AuthorDate: Wed Aug 26 23:04:56 2020 -0700

HDFS-15540. Directories protected from delete can still be moved to the 
trash. Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
(cherry picked from commit 2ffe00fc46aa74929e722dc1804fb0b3d48ee7a9)
---
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |  5 ++
 .../server/namenode/TestProtectedDirectories.java  | 70 ++
 2 files changed, 75 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 602f996..423f3a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -262,6 +262,11 @@ class FSDirRenameOp {
   throws IOException {
 final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
 final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK);
+
+if(fsd.isNonEmptyDirectory(srcIIP)) {
+  DFSUtil.checkProtectedDescendants(fsd, srcIIP);
+}
+
 if (fsd.isPermissionEnabled()) {
   boolean renameToTrash = false;
   if (null != options &&
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
index c15af55..e5f2631 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
@@ -36,6 +38,7 @@ import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.*;
 
@@ -284,6 +287,31 @@ public class TestProtectedDirectories {
 }
   }
 
+  @Test
+  public void testMoveToTrash() throws Throwable {
+for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
+  Configuration conf = new HdfsConfiguration();
+  conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
+  MiniDFSCluster cluster = setupTestCase(
+  conf, testMatrixEntry.getProtectedPaths(),
+  testMatrixEntry.getUnprotectedPaths());
+
+  try {
+LOG.info("Running {}", testMatrixEntry);
+FileSystem fs = cluster.getFileSystem();
+for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) {
+  assertThat(
+  testMatrixEntry + ": Testing whether " + path +
+  " can be moved to trash",
+  moveToTrash(fs, path, conf),
+  is(testMatrixEntry.canPathBeDeleted(path)));
+}
+  } finally {
+cluster.shutdown();
+  }
+}
+  }
+
   /*
* Verify that protected directories could not be renamed.
*/
@@ -340,6 +368,33 @@ public class TestProtectedDirectories {
   }
 
   @Test
+  public void testMoveProtectedSubDirsToTrash() throws Throwable {
+for (TestMatrixEntry testMatrixEntry :
+createTestMatrixForProtectSubDirs()) {
+  Configuration conf = new HdfsConfiguration();
+  conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true);
+  conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
+  MiniDFSCluster cluster = setupTestCase(
+  conf, testMatrixEntry.getProtectedPaths(),
+  testMatrixEntry.getUnprotectedPaths());
+
+  try {
+LOG.info("Running {}", testMatrixEntry);
+FileSystem fs = cluster.getFileSystem();
+for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) {
+  assertThat(
+  testMatrixEntry + ": Testing whether "
+  + srcPath + " can be moved to trash",
+  moveToTrash(fs, srcPath, conf),
+  is(testMatrixEntry.canPathBeRenamed(srcPath)));
+}
+  } finally {
+cluster.shutdown();
+  }
+}
+  }
+
+  @Test
   public void testDeleteProtectSubDirs() throws Throwable {
 for (T

[hadoop] 01/02: HDFS-15243. Add an option to prevent sub-directories of protected directories from deletion. Contributed by liuyanyu.

2020-08-26 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9eaa3520e6b38f337743b2f6da15f7e12be3d637
Author: Ayush Saxena 
AuthorDate: Tue May 12 13:11:31 2020 +0530

HDFS-15243. Add an option to prevent sub-directories of protected 
directories from deletion. Contributed by liuyanyu.

(cherry picked from commit 0fe49036e557f210a390e07276f5732bc212ae32)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 10 +++
 .../main/java/org/apache/hadoop/hdfs/DFSUtil.java  | 12 
 .../hadoop/hdfs/server/namenode/FSDirectory.java   | 10 +++
 .../src/main/resources/hdfs-default.xml|  8 +++
 .../server/namenode/TestProtectedDirectories.java  | 81 ++
 5 files changed, 121 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e4a710f..4b8c27b 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1471,6 +1471,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   "dfs.namenode.state.context.enabled";
   public static final boolean DFS_NAMENODE_STATE_CONTEXT_ENABLED_DEFAULT = 
false;
 
+  /**
+   * whether to protect the subdirectories of directories which
+   * set on fs.protected.directories.
+   */
+  public static final String DFS_PROTECTED_SUBDIRECTORIES_ENABLE =
+  "dfs.protected.subdirectories.enable";
+  // Default value for DFS_PROTECTED_SUBDIRECTORIES_ENABLE.
+  public static final boolean DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT =
+  false;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 0facfd9..00f14cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1809,6 +1809,18 @@ public class DFSUtil {
 + descendant);
   }
 }
+
+if (fsd.isProtectedSubDirectoriesEnable()) {
+  while (!src.isEmpty()) {
+int index = src.lastIndexOf(Path.SEPARATOR_CHAR);
+src = src.substring(0, index);
+if (protectedDirs.contains(src)) {
+  throw new AccessControlException(
+  "Cannot delete/rename subdirectory under protected subdirectory "
+  + src);
+}
+  }
+}
   }
 
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index ac976d3..817844f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -89,6 +89,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECI
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
@@ -169,6 +171,7 @@ public class FSDirectory implements Closeable {
   //
   // Each entry in this set must be a normalized path.
   private volatile SortedSet protectedDirectories;
+  private final boolean isProtectedSubDirectoriesEnable;
 
   private final boolean isPermissionEnabled;
   private final boolean isPermissionContentSummarySubAccess;
@@ -381,6 +384,9 @@ public class FSDirectory implements Closeable {
 DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
 
 this.protectedDirectories = parseProtectedDirectories(conf);
+this.isProtectedSubDirectoriesEnable = conf.getBoolean(
+DFS_PROTECTED_SUBDIRECTORIES_ENABLE,
+DFS_PROTECTED_SUBDIRECTORIES_ENABLE_DEFAULT);
 
 P

[hadoop] branch trunk updated: HDFS-15540. Directories protected from delete can still be moved to the trash. Contributed by Stephen O'Donnell.

2020-08-26 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2ffe00f  HDFS-15540. Directories protected from delete can still be 
moved to the trash. Contributed by Stephen O'Donnell.
2ffe00f is described below

commit 2ffe00fc46aa74929e722dc1804fb0b3d48ee7a9
Author: Stephen O'Donnell 
AuthorDate: Wed Aug 26 23:04:56 2020 -0700

HDFS-15540. Directories protected from delete can still be moved to the 
trash. Contributed by Stephen O'Donnell.

Signed-off-by: Wei-Chiu Chuang 
---
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |  5 ++
 .../server/namenode/TestProtectedDirectories.java  | 70 ++
 2 files changed, 75 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 7396519..43dd1b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -263,6 +263,11 @@ class FSDirRenameOp {
   throws IOException {
 final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
 final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK);
+
+if(fsd.isNonEmptyDirectory(srcIIP)) {
+  DFSUtil.checkProtectedDescendants(fsd, srcIIP);
+}
+
 if (fsd.isPermissionEnabled()) {
   boolean renameToTrash = false;
   if (null != options &&
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
index c15af55..e5f2631 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
@@ -36,6 +38,7 @@ import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.*;
 
@@ -284,6 +287,31 @@ public class TestProtectedDirectories {
 }
   }
 
+  @Test
+  public void testMoveToTrash() throws Throwable {
+for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
+  Configuration conf = new HdfsConfiguration();
+  conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
+  MiniDFSCluster cluster = setupTestCase(
+  conf, testMatrixEntry.getProtectedPaths(),
+  testMatrixEntry.getUnprotectedPaths());
+
+  try {
+LOG.info("Running {}", testMatrixEntry);
+FileSystem fs = cluster.getFileSystem();
+for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) {
+  assertThat(
+  testMatrixEntry + ": Testing whether " + path +
+  " can be moved to trash",
+  moveToTrash(fs, path, conf),
+  is(testMatrixEntry.canPathBeDeleted(path)));
+}
+  } finally {
+cluster.shutdown();
+  }
+}
+  }
+
   /*
* Verify that protected directories could not be renamed.
*/
@@ -340,6 +368,33 @@ public class TestProtectedDirectories {
   }
 
   @Test
+  public void testMoveProtectedSubDirsToTrash() throws Throwable {
+for (TestMatrixEntry testMatrixEntry :
+createTestMatrixForProtectSubDirs()) {
+  Configuration conf = new HdfsConfiguration();
+  conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true);
+  conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
+  MiniDFSCluster cluster = setupTestCase(
+  conf, testMatrixEntry.getProtectedPaths(),
+  testMatrixEntry.getUnprotectedPaths());
+
+  try {
+LOG.info("Running {}", testMatrixEntry);
+FileSystem fs = cluster.getFileSystem();
+for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) {
+  assertThat(
+  testMatrixEntry + ": Testing whether "
+  + srcPath + " can be moved to trash",
+  moveToTrash(fs, srcPath, conf),
+  is(testMatrixEntry.canPathBeRenamed(srcPath)));
+}

[hadoop] branch trunk updated: HDFS-15510. RBF: Quota and Content Summary was not correct in Multiple Destinations. Contributed by Hemanth Boyina.

2020-08-26 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ca8e7a7  HDFS-15510. RBF: Quota and Content Summary was not correct in 
Multiple Destinations. Contributed by Hemanth Boyina.
ca8e7a7 is described below

commit ca8e7a77256003e11ab7e3d079ee4cf9f50080dd
Author: Takanobu Asanuma 
AuthorDate: Thu Aug 27 12:10:39 2020 +0900

HDFS-15510. RBF: Quota and Content Summary was not correct in Multiple 
Destinations. Contributed by Hemanth Boyina.
---
 .../federation/router/RouterClientProtocol.java|  6 ++-
 ...erRPCMultipleDestinationMountTableResolver.java | 57 ++
 2 files changed, 61 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index e2ec030..fb3eb18 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -1863,6 +1863,8 @@ public class RouterClientProtocol implements 
ClientProtocol {
 
   /**
* Aggregate content summaries for each subcluster.
+   * If the mount point has multiple destinations
+   * add the quota set value only once.
*
* @param summaries Collection of individual summaries.
* @return Aggregated content summary.
@@ -1885,9 +1887,9 @@ public class RouterClientProtocol implements 
ClientProtocol {
   length += summary.getLength();
   fileCount += summary.getFileCount();
   directoryCount += summary.getDirectoryCount();
-  quota += summary.getQuota();
+  quota = summary.getQuota();
   spaceConsumed += summary.getSpaceConsumed();
-  spaceQuota += summary.getSpaceQuota();
+  spaceQuota = summary.getSpaceQuota();
   // We return from the first response as we assume that the EC policy
   // of each sub-cluster is same.
   if (ecPolicy.isEmpty()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
index 6ebc311..ebb62d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
@@ -583,6 +583,63 @@ public class 
TestRouterRPCMultipleDestinationMountTableResolver {
 assertEquals(-1, cs1.getSpaceQuota());
   }
 
+  @Test
+  public void testContentSummaryWithMultipleDest() throws Exception {
+MountTable addEntry;
+long nsQuota = 5;
+long ssQuota = 100;
+Path path = new Path("/testContentSummaryWithMultipleDest");
+Map destMap = new HashMap<>();
+destMap.put("ns0", "/testContentSummaryWithMultipleDest");
+destMap.put("ns1", "/testContentSummaryWithMultipleDest");
+nnFs0.mkdirs(path);
+nnFs1.mkdirs(path);
+addEntry =
+MountTable.newInstance("/testContentSummaryWithMultipleDest", destMap);
+addEntry.setQuota(
+new RouterQuotaUsage.Builder().quota(nsQuota).spaceQuota(ssQuota)
+.build());
+assertTrue(addMountTable(addEntry));
+RouterQuotaUpdateService updateService =
+routerContext.getRouter().getQuotaCacheUpdateService();
+updateService.periodicInvoke();
+ContentSummary cs = routerFs.getContentSummary(path);
+assertEquals(nsQuota, cs.getQuota());
+assertEquals(ssQuota, cs.getSpaceQuota());
+ContentSummary ns0Cs = nnFs0.getContentSummary(path);
+assertEquals(nsQuota, ns0Cs.getQuota());
+assertEquals(ssQuota, ns0Cs.getSpaceQuota());
+ContentSummary ns1Cs = nnFs1.getContentSummary(path);
+assertEquals(nsQuota, ns1Cs.getQuota());
+assertEquals(ssQuota, ns1Cs.getSpaceQuota());
+  }
+
+  @Test
+  public void testContentSummaryMultipleDestWithMaxValue()
+  throws Exception {
+MountTable addEntry;
+long nsQuota = Long.MAX_VALUE - 2;
+long ssQuota = Long.MAX_VALUE - 2;
+Path path = new Path("/testContentSummaryMultipleDestWithMaxValue");
+Map destMap = new HashMap<>();
+destMap.put("ns0", "/testContentSummaryMultipleDestWithMaxValue");
+destMap.put("ns1", "/testContentSummaryMultipleDestWithMaxValue");
+nnFs0.mkdirs(path);
+nnFs1.mkdirs(path);
+addEntry = MountTable
+  

[hadoop] branch branch-3.1 updated: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 9f94c9e  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"
9f94c9e is described below

commit 9f94c9e60dc5e663774c6bd3ef601b4d38039377
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 11:24:03 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit 12fb9e0600f665aca3e7ebe0be9b95ff232d520f.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 2471e0a..0e4168c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1115,26 +1115,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1149,7 +1130,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1170,27 +1151,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index 7e2c250d..bf4a2cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -154,42 +154,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(k

[hadoop] branch branch-3.2 updated: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new acec431  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"
acec431 is described below

commit acec4313777d4c13f151ecd286cf2e88c5d44d9e
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 11:23:26 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit d06f0de3affbd5e8232a6fcdb9a3c396934b6a05.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index c91cf73..11f91f2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1116,26 +1116,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1150,7 +1131,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1171,27 +1152,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index 7e2c250d..bf4a2cc 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -154,42 +154,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(k

[hadoop] branch branch-3.3 updated: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new ee7d214  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"
ee7d214 is described below

commit ee7d21411869ec18f620615b9e62caa5add72a1d
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 11:22:46 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit da129a67bb4a169d3efcfc7cf298af68bad5fb73.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 57a4c74..d37da72 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1233,26 +1233,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1267,7 +1248,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1288,27 +1269,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index bf4cf75..d233234 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -158,42 +158,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(ke

[hadoop] branch branch-3.2 updated: HDFS-14096. [SPS] : Add Support for Storage Policy Satisfier in ViewFs. Contributed by Ayush Saxena.

2020-08-26 Thread umamahesh
This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 3bacea2  HDFS-14096. [SPS] : Add Support for Storage Policy Satisfier 
in ViewFs. Contributed by Ayush Saxena.
3bacea2 is described below

commit 3bacea2e5ebd330964bfca4c1064d8f07d09112d
Author: Surendra Singh Lilhore 
AuthorDate: Mon Dec 17 11:24:57 2018 +0530

HDFS-14096. [SPS] : Add Support for Storage Policy Satisfier in ViewFs. 
Contributed by Ayush Saxena.

(cherry picked from commit 788e7473a404fa074b3af522416ee3d2fae865a0)
---
 .../java/org/apache/hadoop/fs/AbstractFileSystem.java  | 10 ++
 .../main/java/org/apache/hadoop/fs/FileContext.java| 18 ++
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java | 10 ++
 .../java/org/apache/hadoop/fs/FilterFileSystem.java|  5 +
 .../src/main/java/org/apache/hadoop/fs/FilterFs.java   |  5 +
 .../apache/hadoop/fs/viewfs/ChRootedFileSystem.java|  5 +
 .../java/org/apache/hadoop/fs/viewfs/ChRootedFs.java   |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java| 13 +
 .../main/java/org/apache/hadoop/fs/viewfs/ViewFs.java  | 12 
 .../java/org/apache/hadoop/fs/TestHarFileSystem.java   |  2 ++
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   |  5 +
 .../src/main/java/org/apache/hadoop/fs/Hdfs.java   |  5 +
 .../org/apache/hadoop/hdfs/DistributedFileSystem.java  |  6 +-
 13 files changed, 96 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index c7b21fc..9926a74 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -1250,6 +1250,16 @@ public abstract class AbstractFileSystem implements 
PathCapabilities {
   }
 
   /**
+   * Set the source path to satisfy storage policy.
+   * @param path The source path referring to either a directory or a file.
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(final Path path) throws IOException {
+throw new UnsupportedOperationException(
+getClass().getSimpleName() + " doesn't support satisfyStoragePolicy");
+  }
+
+  /**
* Set the storage policy for a given file or directory.
*
* @param path file or directory path.
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index ace892d..4357c88 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -2778,6 +2778,24 @@ public class FileContext implements PathCapabilities {
   }
 
   /**
+   * Set the source path to satisfy storage policy.
+   * @param path The source path referring to either a directory or a file.
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(final Path path)
+  throws IOException {
+final Path absF = fixRelativePart(path);
+new FSLinkResolver() {
+  @Override
+  public Void next(final AbstractFileSystem fs, final Path p)
+  throws IOException {
+fs.satisfyStoragePolicy(path);
+return null;
+  }
+}.resolve(this, absF);
+  }
+
+  /**
* Set the storage policy for a given file or directory.
*
* @param path file or directory path.
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index bac398b..22586b2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3116,6 +3116,16 @@ public abstract class FileSystem extends Configured
   }
 
   /**
+   * Set the source path to satisfy storage policy.
+   * @param path The source path referring to either a directory or a file.
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(final Path path) throws IOException {
+throw new UnsupportedOperationException(
+getClass().getSimpleName() + " doesn't support setStoragePolicy");
+  }
+
+  /**
* Set the storage policy for a given file or directory.
*
* @param src file or directory path.
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src

[hadoop] branch revert-2197-trunk created (now b7745b0)

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a change to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

This branch includes the following new commits:

 new b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b7745b00b2810fd405e19971ac8da27ef1668b01
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 10:41:00 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit a932796d0cad3d84df0003782e4247cbc2dcca93.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index d1ab436..91b64ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1232,26 +1232,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1266,7 +1247,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1287,27 +1268,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index bf4cf75..d233234 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -158,42 +158,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(keytab, principal);
-
-UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
-UserGroupInformation ugi = UserGroupInformation.getLoginUser();
-Assert.assertTrue("UGI should be c

[hadoop] 01/01: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b7745b00b2810fd405e19971ac8da27ef1668b01
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 10:41:00 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit a932796d0cad3d84df0003782e4247cbc2dcca93.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index d1ab436..91b64ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1232,26 +1232,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1266,7 +1247,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1287,27 +1268,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index bf4cf75..d233234 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -158,42 +158,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(keytab, principal);
-
-UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
-UserGroupInformation ugi = UserGroupInformation.getLoginUser();
-Assert.assertTrue("UGI should be c

[hadoop] branch revert-2197-trunk created (now b7745b0)

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a change to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

This branch includes the following new commits:

 new b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-14096. [SPS] : Add Support for Storage Policy Satisfier in ViewFs. Contributed by Ayush Saxena.

2020-08-26 Thread umamahesh
This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 3bacea2  HDFS-14096. [SPS] : Add Support for Storage Policy Satisfier 
in ViewFs. Contributed by Ayush Saxena.
3bacea2 is described below

commit 3bacea2e5ebd330964bfca4c1064d8f07d09112d
Author: Surendra Singh Lilhore 
AuthorDate: Mon Dec 17 11:24:57 2018 +0530

HDFS-14096. [SPS] : Add Support for Storage Policy Satisfier in ViewFs. 
Contributed by Ayush Saxena.

(cherry picked from commit 788e7473a404fa074b3af522416ee3d2fae865a0)
---
 .../java/org/apache/hadoop/fs/AbstractFileSystem.java  | 10 ++
 .../main/java/org/apache/hadoop/fs/FileContext.java| 18 ++
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java | 10 ++
 .../java/org/apache/hadoop/fs/FilterFileSystem.java|  5 +
 .../src/main/java/org/apache/hadoop/fs/FilterFs.java   |  5 +
 .../apache/hadoop/fs/viewfs/ChRootedFileSystem.java|  5 +
 .../java/org/apache/hadoop/fs/viewfs/ChRootedFs.java   |  5 +
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java| 13 +
 .../main/java/org/apache/hadoop/fs/viewfs/ViewFs.java  | 12 
 .../java/org/apache/hadoop/fs/TestHarFileSystem.java   |  2 ++
 .../hadoop/fs/viewfs/ViewFileSystemBaseTest.java   |  5 +
 .../src/main/java/org/apache/hadoop/fs/Hdfs.java   |  5 +
 .../org/apache/hadoop/hdfs/DistributedFileSystem.java  |  6 +-
 13 files changed, 96 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index c7b21fc..9926a74 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -1250,6 +1250,16 @@ public abstract class AbstractFileSystem implements 
PathCapabilities {
   }
 
   /**
+   * Set the source path to satisfy storage policy.
+   * @param path The source path referring to either a directory or a file.
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(final Path path) throws IOException {
+throw new UnsupportedOperationException(
+getClass().getSimpleName() + " doesn't support satisfyStoragePolicy");
+  }
+
+  /**
* Set the storage policy for a given file or directory.
*
* @param path file or directory path.
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index ace892d..4357c88 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -2778,6 +2778,24 @@ public class FileContext implements PathCapabilities {
   }
 
   /**
+   * Set the source path to satisfy storage policy.
+   * @param path The source path referring to either a directory or a file.
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(final Path path)
+  throws IOException {
+final Path absF = fixRelativePart(path);
+new FSLinkResolver() {
+  @Override
+  public Void next(final AbstractFileSystem fs, final Path p)
+  throws IOException {
+fs.satisfyStoragePolicy(path);
+return null;
+  }
+}.resolve(this, absF);
+  }
+
+  /**
* Set the storage policy for a given file or directory.
*
* @param path file or directory path.
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index bac398b..22586b2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -3116,6 +3116,16 @@ public abstract class FileSystem extends Configured
   }
 
   /**
+   * Set the source path to satisfy storage policy.
+   * @param path The source path referring to either a directory or a file.
+   * @throws IOException
+   */
+  public void satisfyStoragePolicy(final Path path) throws IOException {
+throw new UnsupportedOperationException(
+getClass().getSimpleName() + " doesn't support setStoragePolicy");
+  }
+
+  /**
* Set the storage policy for a given file or directory.
*
* @param src file or directory path.
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src

[hadoop] 01/01: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b7745b00b2810fd405e19971ac8da27ef1668b01
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 10:41:00 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit a932796d0cad3d84df0003782e4247cbc2dcca93.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index d1ab436..91b64ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1232,26 +1232,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1266,7 +1247,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1287,27 +1268,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index bf4cf75..d233234 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -158,42 +158,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(keytab, principal);
-
-UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
-UserGroupInformation ugi = UserGroupInformation.getLoginUser();
-Assert.assertTrue("UGI should be c

[hadoop] branch revert-2197-trunk created (now b7745b0)

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a change to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

This branch includes the following new commits:

 new b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b7745b00b2810fd405e19971ac8da27ef1668b01
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 10:41:00 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit a932796d0cad3d84df0003782e4247cbc2dcca93.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index d1ab436..91b64ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1232,26 +1232,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1266,7 +1247,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1287,27 +1268,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index bf4cf75..d233234 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -158,42 +158,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(keytab, principal);
-
-UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
-UserGroupInformation ugi = UserGroupInformation.getLoginUser();
-Assert.assertTrue("UGI should be c

[hadoop] branch revert-2197-trunk created (now b7745b0)

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a change to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

This branch includes the following new commits:

 new b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch revert-2197-trunk created (now b7745b0)

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a change to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

This branch includes the following new commits:

 new b7745b0  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch revert-2197-trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b7745b00b2810fd405e19971ac8da27ef1668b01
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 10:41:00 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit a932796d0cad3d84df0003782e4247cbc2dcca93.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index d1ab436..91b64ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1232,26 +1232,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1266,7 +1247,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1287,27 +1268,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index bf4cf75..d233234 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -158,42 +158,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(keytab, principal);
-
-UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
-UserGroupInformation ugi = UserGroupInformation.getLoginUser();
-Assert.assertTrue("UGI should be c

[hadoop] branch trunk updated: Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation class (#2197)"

2020-08-26 Thread liuml07
This is an automated email from the ASF dual-hosted git repository.

liuml07 pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5e52955  Revert "HADOOP-17159 Ability for forceful relogin in 
UserGroupInformation class (#2197)"
5e52955 is described below

commit 5e52955112a3151bb608e092f31fc5084de78705
Author: Mingliang Liu 
AuthorDate: Wed Aug 26 10:41:10 2020 -0700

Revert "HADOOP-17159 Ability for forceful relogin in UserGroupInformation 
class (#2197)"

This reverts commit a932796d0cad3d84df0003782e4247cbc2dcca93.
---
 .../hadoop/security/UserGroupInformation.java  | 35 +
 .../hadoop/security/TestUGILoginFromKeytab.java| 36 --
 2 files changed, 7 insertions(+), 64 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index d1ab436..91b64ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1232,26 +1232,7 @@ public class UserGroupInformation {
 reloginFromKeytab(false);
   }
 
-  /**
-   * Force re-Login a user in from a keytab file. Loads a user identity from a
-   * keytab file and logs them in. They become the currently logged-in user.
-   * This method assumes that {@link #loginUserFromKeytab(String, String)} had
-   * happened already. The Subject field of this UserGroupInformation object is
-   * updated to have the new credentials.
-   *
-   * @param ignoreTimeElapsed Force re-login irrespective of the time of last
-   *  login
-   * @throws IOException
-   * @throws KerberosAuthException on a failure
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public void reloginFromKeytab(boolean ignoreTimeElapsed) throws IOException {
-reloginFromKeytab(false, ignoreTimeElapsed);
-  }
-
-  private void reloginFromKeytab(boolean checkTGT, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
 if (!shouldRelogin() || !isFromKeytab()) {
   return;
 }
@@ -1266,7 +1247,7 @@ public class UserGroupInformation {
 return;
   }
 }
-relogin(login, ignoreTimeElapsed);
+relogin(login);
   }
 
   /**
@@ -1287,27 +1268,25 @@ public class UserGroupInformation {
 if (login == null) {
   throw new KerberosAuthException(MUST_FIRST_LOGIN);
 }
-relogin(login, false);
+relogin(login);
   }
 
-  private void relogin(HadoopLoginContext login, boolean ignoreTimeElapsed)
-  throws IOException {
+  private void relogin(HadoopLoginContext login) throws IOException {
 // ensure the relogin is atomic to avoid leaving credentials in an
 // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
 // from accessing or altering credentials during the relogin.
 synchronized(login.getSubjectLock()) {
   // another racing thread may have beat us to the relogin.
   if (login == getLogin()) {
-unprotectedRelogin(login, ignoreTimeElapsed);
+unprotectedRelogin(login);
   }
 }
   }
 
-  private void unprotectedRelogin(HadoopLoginContext login,
-  boolean ignoreTimeElapsed) throws IOException {
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException 
{
 assert Thread.holdsLock(login.getSubjectLock());
 long now = Time.now();
-if (!hasSufficientTimeElapsed(now) && !ignoreTimeElapsed) {
+if (!hasSufficientTimeElapsed(now)) {
   return;
 }
 // register most recent relogin attempt
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index bf4cf75..d233234 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -158,42 +158,6 @@ public class TestUGILoginFromKeytab {
 Assert.assertNotSame(login1, login2);
   }
 
-  /**
-   * Force re-login from keytab using the MiniKDC and verify the UGI can
-   * successfully relogin from keytab as well.
-   */
-  @Test
-  public void testUGIForceReLoginFromKeytab() throws Exception {
-// Set this to false as we are testing force re-login anyways
-UserGroupInformation.setShouldRenewImmediatelyForTests(false);
-String principal = "foo";
-File keytab = new File(workDir, "foo.keytab");
-kdc.createPrincipal(keytab, prin

[hadoop] branch branch-3.2 updated: HDFS-8631. WebHDFS : Support setQuota. Contributed by Chao Sun. (Backported)

2020-08-26 Thread umamahesh
This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 0512b27  HDFS-8631. WebHDFS : Support setQuota. Contributed by Chao 
Sun. (Backported)
0512b27 is described below

commit 0512b27172b87929428eeac5956dbb2cae4f2a09
Author: Uma Maheswara Rao G 
AuthorDate: Wed Aug 26 09:33:51 2020 -0700

HDFS-8631. WebHDFS : Support setQuota. Contributed by Chao Sun. (Backported)
---
 .../main/java/org/apache/hadoop/fs/FileSystem.java | 43 +++
 .../org/apache/hadoop/fs/TestFilterFileSystem.java |  2 +
 .../org/apache/hadoop/fs/TestHarFileSystem.java|  2 +
 .../apache/hadoop/hdfs/DistributedFileSystem.java  |  2 +
 .../apache/hadoop/hdfs/web/WebHdfsFileSystem.java  | 43 +++
 .../hdfs/web/resources/NameSpaceQuotaParam.java| 44 +++
 .../hadoop/hdfs/web/resources/PutOpParam.java  |  3 +
 .../hdfs/web/resources/StorageSpaceQuotaParam.java | 45 +++
 .../hdfs/web/resources/StorageTypeParam.java   | 37 +
 .../federation/router/RouterWebHdfsMethods.java| 10 +++-
 .../web/resources/NamenodeWebHdfsMethods.java  | 51 ++---
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md   | 64 ++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java| 58 
 .../hadoop/hdfs/web/resources/TestParam.java   | 29 ++
 14 files changed, 423 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index ba892ed..bac398b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1778,6 +1778,33 @@ public abstract class FileSystem extends Configured
   }
 
   /**
+   * Set quota for the given {@link Path}.
+   *
+   * @param src the target path to set quota for
+   * @param namespaceQuota the namespace quota (i.e., # of files/directories)
+   *   to set
+   * @param storagespaceQuota the storage space quota to set
+   * @throws IOException IO failure
+   */
+  public void setQuota(Path src, final long namespaceQuota,
+  final long storagespaceQuota) throws IOException {
+methodNotSupported();
+  }
+
+  /**
+   * Set per storage type quota for the given {@link Path}.
+   *
+   * @param src the target path to set storage type quota for
+   * @param type the storage type to set
+   * @param quota the quota to set for the given storage type
+   * @throws IOException IO failure
+   */
+  public void setQuotaByStorageType(Path src, final StorageType type,
+  final long quota) throws IOException {
+methodNotSupported();
+  }
+
+  /**
* The default filter accepts all paths.
*/
   private static final PathFilter DEFAULT_FILTER = new PathFilter() {
@@ -4297,6 +4324,22 @@ public abstract class FileSystem extends Configured
   }
 
   /**
+   * Helper method that throws an {@link UnsupportedOperationException} for the
+   * current {@link FileSystem} method being called.
+   */
+  private void methodNotSupported() {
+// The order of the stacktrace elements is (from top to bottom):
+//   - java.lang.Thread.getStackTrace
+//   - org.apache.hadoop.fs.FileSystem.methodNotSupported
+//   - 
+// therefore, to find out the current method name, we use the element at
+// index 2.
+String name = Thread.currentThread().getStackTrace()[2].getMethodName();
+throw new UnsupportedOperationException(getClass().getCanonicalName() +
+" does not support method " + name);
+  }
+
+  /**
* Create a Builder to append a file.
* @param path file path.
* @return a {@link FSDataOutputStreamBuilder} to build file append request.
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
index 7c4dfe5..c16ea87 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
@@ -135,6 +135,8 @@ public class TestFilterFileSystem {
 public Path fixRelativePart(Path p);
 public ContentSummary getContentSummary(Path f);
 public QuotaUsage getQuotaUsage(Path f);
+void setQuota(Path f, long namespaceQuota, long storagespaceQuota);
+void setQuotaByStorageType(Path f, StorageType type, long quota);
 StorageStatistics getStorageStatistics();
   }
 
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFile

[hadoop] branch trunk updated: HADOOP-17224. Install Intel ISA-L library in Dockerfile. (#2243)

2020-08-26 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 931adba  HADOOP-17224. Install Intel ISA-L library in Dockerfile. 
(#2243)
931adba is described below

commit 931adbaa1412dcf8dc8679a8305c5146a4fe2821
Author: Takanobu Asanuma 
AuthorDate: Wed Aug 26 23:15:24 2020 +0900

HADOOP-17224. Install Intel ISA-L library in Dockerfile. (#2243)
---
 dev-support/docker/Dockerfile | 20 
 1 file changed, 20 insertions(+)

diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 2af73eb..c4c6bbf 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -167,6 +167,26 @@ RUN curl -L -s -S \
&& shasum -a 512 /bin/hadolint | \
 awk 
'$1!="734e37c1f6619cbbd86b9b249e69c9af8ee1ea87a2b1ff71dccda412e9dac35e63425225a95d71572091a3f0a11e9a04c2fc25d9e91b840530c26af32b9891ca"
 {exit(1)}'
 
+##
+# Intel ISA-L 2.29.0
+##
+# hadolint ignore=DL3003,DL3008
+RUN mkdir -p /opt/isa-l-src \
+&& apt-get -q update \
+&& apt-get install -y --no-install-recommends automake yasm \
+&& apt-get clean \
+&& curl -L -s -S \
+  https://github.com/intel/isa-l/archive/v2.29.0.tar.gz \
+  -o /opt/isa-l.tar.gz \
+&& tar xzf /opt/isa-l.tar.gz --strip-components 1 -C /opt/isa-l-src \
+&& cd /opt/isa-l-src \
+&& ./autogen.sh \
+&& ./configure \
+&& make \
+&& make install \
+&& cd /root \
+&& rm -rf /opt/isa-l-src
+
 ###
 # Avoid out of memory errors in builds
 ###


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-1806. Add ThreadDump Option in YARN UI2 to fetch for running containers

2020-08-26 Thread prabhujoseph
This is an automated email from the ASF dual-hosted git repository.

prabhujoseph pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 75db552  YARN-1806. Add ThreadDump Option in YARN UI2 to fetch for 
running containers
75db552 is described below

commit 75db5526b5db2675a7f396715f48733a7ed26acf
Author: Prabhu Joseph 
AuthorDate: Wed Aug 26 13:08:14 2020 +0530

YARN-1806. Add ThreadDump Option in YARN UI2 to fetch for running containers

Contributed by Siddharth Ahuja. Reviewed by Akhil PB.
---
 .../app/adapters/yarn-container-threaddump.js  |  88 +++
 .../webapp/app/adapters/yarn-node-container-log.js |  94 +++
 .../webapp/app/controllers/yarn-app/threaddump.js  | 281 +
 .../src/main/webapp/app/mixins/app-attempt.js  |  22 ++
 .../webapp/app/models/yarn-node-container-log.js   |  23 ++
 .../hadoop-yarn-ui/src/main/webapp/app/router.js   |   1 +
 .../main/webapp/app/routes/yarn-app/threaddump.js  |  68 +
 .../app/serializers/yarn-node-container-log.js |  50 
 .../src/main/webapp/app/templates/yarn-app.hbs |   3 +
 .../webapp/app/templates/yarn-app/threaddump.hbs   | 113 +
 10 files changed, 743 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-threaddump.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-threaddump.js
new file mode 100644
index 000..c4e9382
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-threaddump.js
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import RESTAbstractAdapter from './restabstract';
+
+export default RESTAbstractAdapter.extend({
+  address: "rmWebAddress",
+  restNameSpace: "cluster",
+
+  handleResponse(status, headers, payload, requestData) {
+// If the user is not authorized to signal a threaddump for a container,
+// the response contains a RemoteException with a 403 (Forbidden) status
+// code. Extract out the error message from the RemoteException in this
+// case.
+// If the status is '0' or empty, it is symptomatic of the YARN role not
+// available or able to respond or a network timeout/firewall issue.
+if (status === 403)  {
+  if (payload
+  && typeof payload === 'object'
+  && payload.RemoteException
+  && payload.RemoteException.message) {
+return new Error(payload.RemoteException.message);
+  }
+} else if (status === 0 && payload === "") {
+  return new Error("Not able to connect to YARN!");
+}
+
+return payload;
+  },
+
+  /**
+   * Set up the POST request to use the signalToContainer REST API
+   * to signal a thread dump for a running container to RM.
+   */
+  signalContainerForThreaddump(request, containerId, user) {
+var url = this.buildURL();
+if (user && containerId) {
+  url += "/containers" + "/" + containerId + "/signal"
++ "/OUTPUT_THREAD_DUMP" + "?user.name=" + user;
+}
+return this.ajax(url, "POST", {data: request});
+  },
+
+  ajax(url, method, hash) {
+hash = {};
+hash.crossDomain = true;
+hash.xhrFields = {withCredentials: true};
+hash.targetServer = "RM";
+return this._super(url, method, hash);
+  },
+
+  /**
+   * Override options so that result is not expected to be JSON
+   */
+  ajaxOptions: function (url, type, options) {
+var hash = options || {};
+hash.url = url;
+hash.type = type;
+// Make sure jQuery does not try to convert response to JSON.
+hash.dataType = 'text';
+hash.context = this;
+
+var headers = Ember.get(this, 'headers');
+if (headers !== undefined) {
+  hash.beforeSend = function (xhr) {
+Object.keys(headers).forEach(function (key) {
+  return xhr.setRequestHeader(key, headers[key]);
+});
+  };
+}
+return hash;
+  }
+});
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-node-container-log.js
 
b/hadoop-yarn-project/hadoop-y

[hadoop] branch branch-3.3 updated: HDFS-15536. RBF: Clear Quota in Router was not consistent.

2020-08-26 Thread hemanthboyina
This is an automated email from the ASF dual-hosted git repository.

hemanthboyina pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new ac0a377  HDFS-15536. RBF: Clear Quota in Router was not consistent.
ac0a377 is described below

commit ac0a3991a9c66720ab80a05bec1543420e69
Author: hemanthboyina 
AuthorDate: Wed Aug 26 13:03:08 2020 +0530

HDFS-15536. RBF: Clear Quota in Router was not consistent.

(cherry picked from commit 6e618b6a7e3b7cb6459091945c8eb07fddc0034e)
---
 .../federation/store/records/MountTable.java   |  5 +++
 ...erRPCMultipleDestinationMountTableResolver.java | 49 ++
 2 files changed, 54 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
index d1351a3..5d7d5c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
@@ -430,6 +430,8 @@ public abstract class MountTable extends BaseRecord {
 .append(this.isReadOnly())
 .append(this.getDestOrder())
 .append(this.isFaultTolerant())
+.append(this.getQuota().getQuota())
+.append(this.getQuota().getSpaceQuota())
 .toHashCode();
   }
 
@@ -443,6 +445,9 @@ public abstract class MountTable extends BaseRecord {
   .append(this.isReadOnly(), other.isReadOnly())
   .append(this.getDestOrder(), other.getDestOrder())
   .append(this.isFaultTolerant(), other.isFaultTolerant())
+  .append(this.getQuota().getQuota(), other.getQuota().getQuota())
+  .append(this.getQuota().getSpaceQuota(),
+  other.getQuota().getSpaceQuota())
   .isEquals();
 }
 return false;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
index d00b93c..bcab7bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -34,6 +35,7 @@ import java.util.Map;
 import java.util.TreeSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options.Rename;
@@ -56,7 +58,9 @@ import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationReq
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.hdfs.tools.federation.RouterAdmin;
 import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -509,6 +513,43 @@ public class 
TestRouterRPCMultipleDestinationMountTableResolver {
 verifyRenameOnMultiDestDirectories(DestinationOrder.SPACE, true);
   }
 
+  @Test
+  public void testClearQuota() throws Exception {
+long nsQuota = 5;
+long ssQuota = 100;
+Path path = new Path("/router_test");
+nnFs0.mkdirs(path);
+nnFs1.mkdirs(path);
+MountTable addEntry = MountTable.newInstance("/router_test",
+Collections.singletonMap("ns0", "/router_test"));
+addEntry.setQuota(new RouterQuotaUsage.Builder().build());
+assertTrue(addMountTable(addEntry));
+RouterQuotaUpdateService updateService =
+routerContext.getRouter().getQuotaCacheUpdateService();
+updateService.periodicInvoke();
+
+//set quota and validate the quota
+RouterAdmin admin = getRouterAdmin();
+String[] argv = new String[] {"-setQuota", path.toString(), "-nsQuota",
+String.valueOf(nsQuota), "-ssQuota", String.valueOf(ssQuota)};
+assertEquals(0, ToolRunner.run(admin, argv));
+  

[hadoop] branch trunk updated: HDFS-15536. RBF: Clear Quota in Router was not consistent.

2020-08-26 Thread hemanthboyina
This is an automated email from the ASF dual-hosted git repository.

hemanthboyina pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6e618b6  HDFS-15536. RBF: Clear Quota in Router was not consistent.
6e618b6 is described below

commit 6e618b6a7e3b7cb6459091945c8eb07fddc0034e
Author: hemanthboyina 
AuthorDate: Wed Aug 26 13:03:08 2020 +0530

HDFS-15536. RBF: Clear Quota in Router was not consistent.
---
 .../federation/store/records/MountTable.java   |  5 +++
 ...erRPCMultipleDestinationMountTableResolver.java | 49 ++
 2 files changed, 54 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
index 282fe6c..907a405 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
@@ -430,6 +430,8 @@ public abstract class MountTable extends BaseRecord {
 .append(this.isReadOnly())
 .append(this.getDestOrder())
 .append(this.isFaultTolerant())
+.append(this.getQuota().getQuota())
+.append(this.getQuota().getSpaceQuota())
 .toHashCode();
   }
 
@@ -443,6 +445,9 @@ public abstract class MountTable extends BaseRecord {
   .append(this.isReadOnly(), other.isReadOnly())
   .append(this.getDestOrder(), other.getDestOrder())
   .append(this.isFaultTolerant(), other.isFaultTolerant())
+  .append(this.getQuota().getQuota(), other.getQuota().getQuota())
+  .append(this.getQuota().getSpaceQuota(),
+  other.getQuota().getSpaceQuota())
   .isEquals();
 }
 return false;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
index 181442d..6ebc311 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -34,6 +35,7 @@ import java.util.Map;
 import java.util.TreeSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options.Rename;
@@ -57,7 +59,9 @@ import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationReq
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetDestinationResponse;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.hdfs.tools.federation.RouterAdmin;
 import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -542,6 +546,43 @@ public class 
TestRouterRPCMultipleDestinationMountTableResolver {
 verifyRenameOnMultiDestDirectories(DestinationOrder.SPACE, true);
   }
 
+  @Test
+  public void testClearQuota() throws Exception {
+long nsQuota = 5;
+long ssQuota = 100;
+Path path = new Path("/router_test");
+nnFs0.mkdirs(path);
+nnFs1.mkdirs(path);
+MountTable addEntry = MountTable.newInstance("/router_test",
+Collections.singletonMap("ns0", "/router_test"));
+addEntry.setQuota(new RouterQuotaUsage.Builder().build());
+assertTrue(addMountTable(addEntry));
+RouterQuotaUpdateService updateService =
+routerContext.getRouter().getQuotaCacheUpdateService();
+updateService.periodicInvoke();
+
+//set quota and validate the quota
+RouterAdmin admin = getRouterAdmin();
+String[] argv = new String[] {"-setQuota", path.toString(), "-nsQuota",
+String.valueOf(nsQuota), "-ssQuota", String.valueOf(ssQuota)};
+assertEquals(0, ToolRunner.run(admin, argv));
+updateService.periodicInvoke();
+resolver.loadCache(true);
+ContentSummary cs