HDFS-11432. Federation : Support fully qualified path for 
Quota/Snapshot/cacheadmin/cryptoadmin commands. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcd03df9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcd03df9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcd03df9

Branch: refs/heads/YARN-5972
Commit: dcd03df9f9e0080d7e179060ffc8148336c31b3e
Parents: 989bd56
Author: Brahma Reddy Battula <bra...@apache.org>
Authored: Wed Mar 1 10:45:56 2017 +0530
Committer: Brahma Reddy Battula <bra...@apache.org>
Committed: Wed Mar 1 10:45:56 2017 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/shell/Command.java     | 14 ++++-
 .../apache/hadoop/hdfs/tools/CacheAdmin.java    |  8 +--
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   | 18 +++----
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 57 +++++++++++---------
 .../hdfs/tools/snapshot/SnapshotDiff.java       | 19 ++++++-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  | 26 +++++++++
 .../hadoop/hdfs/TestSnapshotCommands.java       | 34 ++++++++++++
 7 files changed, 136 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
index c573aa0..4c5cbad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
@@ -101,7 +101,17 @@ abstract public class Command extends Configured {
    * @throws IOException if any error occurs
    */
   abstract protected void run(Path path) throws IOException;
-  
+
+  /**
+   * Execute the command on the input path data. Commands can override to make
+   * use of the resolved filesystem.
+   * @param pathData The input path with resolved filesystem
+   * @throws IOException
+   */
+  protected void run(PathData pathData) throws IOException {
+    run(pathData.path);
+  }
+
   /** 
    * For each source path, execute the command
    * 
@@ -113,7 +123,7 @@ abstract public class Command extends Configured {
       try {
         PathData[] srcs = PathData.expandAsGlob(src, getConf());
         for (PathData s : srcs) {
-          run(s.path);
+          run(s);
         }
       } catch (IOException e) {
         exitCode = -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
index 522f701..d8cbfc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
@@ -189,8 +189,9 @@ public class CacheAdmin extends Configured implements Tool {
         System.err.println("Can't understand argument: " + args.get(0));
         return 1;
       }
-        
-      DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+
+      DistributedFileSystem dfs =
+          AdminHelper.getDFS(new Path(path).toUri(), conf);
       CacheDirectiveInfo directive = builder.build();
       EnumSet<CacheFlag> flags = EnumSet.noneOf(CacheFlag.class);
       if (force) {
@@ -409,7 +410,8 @@ public class CacheAdmin extends Configured implements Tool {
       }
       int exitCode = 0;
       try {
-        DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+        DistributedFileSystem dfs =
+            AdminHelper.getDFS(new Path(path).toUri(), conf);
         RemoteIterator<CacheDirectiveEntry> iter =
             dfs.listCacheDirectives(
                 new CacheDirectiveInfo.Builder().

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
index 225f11a..4c7335f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
@@ -139,12 +139,12 @@ public class CryptoAdmin extends Configured implements 
Tool {
         System.err.println("Can't understand argument: " + args.get(0));
         return 1;
       }
-
-      HdfsAdmin admin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+      Path p = new Path(path);
+      HdfsAdmin admin = new HdfsAdmin(p.toUri(), conf);
       EnumSet<CreateEncryptionZoneFlag> flags =
           EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH);
       try {
-        admin.createEncryptionZone(new Path(path), keyName, flags);
+        admin.createEncryptionZone(p, keyName, flags);
         System.out.println("Added encryption zone " + path);
       } catch (IOException e) {
         System.err.println(prettifyException(e));
@@ -226,12 +226,12 @@ public class CryptoAdmin extends Configured implements 
Tool {
         System.err.println("Can't understand argument: " + args.get(0));
         return 1;
       }
-
+      Path p = new Path(path);
       final HdfsAdmin admin =
-          new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+          new HdfsAdmin(p.toUri(), conf);
       try {
         final FileEncryptionInfo fei =
-            admin.getFileEncryptionInfo(new Path(path));
+            admin.getFileEncryptionInfo(p);
         if (fei == null) {
           System.out.println("No FileEncryptionInfo found for path " + path);
           return 2;
@@ -273,10 +273,10 @@ public class CryptoAdmin extends Configured implements 
Tool {
         System.err.println("Can't understand argument: " + args.get(0));
         return 1;
       }
-
-      HdfsAdmin admin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+      Path p = new Path(path);
+      HdfsAdmin admin = new HdfsAdmin(p.toUri(), conf);
       try {
-        admin.provisionEncryptionZoneTrash(new Path(path));
+        admin.provisionEncryptionZoneTrash(p);
         System.out.println("Created a trash directory for " + path);
       } catch (IOException ioe) {
         System.err.println(prettifyException(ioe));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 4dd9d7e..c1f79e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFormat;
+import org.apache.hadoop.fs.shell.PathData;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtilClient;
@@ -109,15 +110,21 @@ public class DFSAdmin extends FsShell {
    * An abstract class for the execution of a file system command
    */
   abstract private static class DFSAdminCommand extends Command {
-    final DistributedFileSystem dfs;
+    protected DistributedFileSystem dfs;
     /** Constructor */
-    public DFSAdminCommand(FileSystem fs) {
-      super(fs.getConf());
+    public DFSAdminCommand(Configuration conf) {
+      super(conf);
+    }
+
+    @Override
+    public void run(PathData pathData) throws IOException {
+      FileSystem fs = pathData.fs;
       if (!(fs instanceof DistributedFileSystem)) {
-        throw new IllegalArgumentException("FileSystem " + fs.getUri() + 
-            " is not an HDFS file system");
+        throw new IllegalArgumentException("FileSystem " + fs.getUri()
+            + " is not an HDFS file system");
       }
-      this.dfs = (DistributedFileSystem)fs;
+      this.dfs = (DistributedFileSystem) fs;
+      run(pathData.path);
     }
   }
   
@@ -133,8 +140,8 @@ public class DFSAdmin extends FsShell {
     "\t\tIt does not fault if the directory has no quota.";
     
     /** Constructor */
-    ClearQuotaCommand(String[] args, int pos, FileSystem fs) {
-      super(fs);
+    ClearQuotaCommand(String[] args, int pos, Configuration conf) {
+      super(conf);
       CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       this.args = parameters.toArray(new String[parameters.size()]);
@@ -179,8 +186,8 @@ public class DFSAdmin extends FsShell {
     private final long quota; // the quota to be set
 
     /** Constructor */
-    SetQuotaCommand(String[] args, int pos, FileSystem fs) {
-      super(fs);
+    SetQuotaCommand(String[] args, int pos, Configuration conf) {
+      super(conf);
       CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       this.quota = Long.parseLong(parameters.remove(0));
@@ -230,8 +237,8 @@ public class DFSAdmin extends FsShell {
     private StorageType type;
 
     /** Constructor */
-    ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
-      super(fs);
+    ClearSpaceQuotaCommand(String[] args, int pos, Configuration conf) {
+      super(conf);
       CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
       c.addOptionWithValue("storageType");
       List<String> parameters = c.parse(args, pos);
@@ -294,8 +301,8 @@ public class DFSAdmin extends FsShell {
     private StorageType type;
     
     /** Constructor */
-    SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
-      super(fs);
+    SetSpaceQuotaCommand(String[] args, int pos, Configuration conf) {
+      super(conf);
       CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       String str = parameters.remove(0).trim();
@@ -705,10 +712,11 @@ public class DFSAdmin extends FsShell {
    * @param argv List of of command line parameters.
    * @exception IOException
    */
-  public void allowSnapshot(String[] argv) throws IOException {   
-    DistributedFileSystem dfs = getDFS();
+  public void allowSnapshot(String[] argv) throws IOException {
+    Path p = new Path(argv[1]);
+    final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
     try {
-      dfs.allowSnapshot(new Path(argv[1]));
+      dfs.allowSnapshot(p);
     } catch (SnapshotException e) {
       throw new RemoteException(e.getClass().getName(), e.getMessage());
     }
@@ -721,10 +729,11 @@ public class DFSAdmin extends FsShell {
    * @param argv List of of command line parameters.
    * @exception IOException
    */
-  public void disallowSnapshot(String[] argv) throws IOException {  
-    DistributedFileSystem dfs = getDFS();
+  public void disallowSnapshot(String[] argv) throws IOException {
+    Path p = new Path(argv[1]);
+    final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
     try {
-      dfs.disallowSnapshot(new Path(argv[1]));
+      dfs.disallowSnapshot(p);
     } catch (SnapshotException e) {
       throw new RemoteException(e.getClass().getName(), e.getMessage());
     }
@@ -2042,13 +2051,13 @@ public class DFSAdmin extends FsShell {
       } else if ("-metasave".equals(cmd)) {
         exitCode = metaSave(argv, i);
       } else if (ClearQuotaCommand.matches(cmd)) {
-        exitCode = new ClearQuotaCommand(argv, i, getDFS()).runAll();
+        exitCode = new ClearQuotaCommand(argv, i, getConf()).runAll();
       } else if (SetQuotaCommand.matches(cmd)) {
-        exitCode = new SetQuotaCommand(argv, i, getDFS()).runAll();
+        exitCode = new SetQuotaCommand(argv, i, getConf()).runAll();
       } else if (ClearSpaceQuotaCommand.matches(cmd)) {
-        exitCode = new ClearSpaceQuotaCommand(argv, i, getDFS()).runAll();
+        exitCode = new ClearSpaceQuotaCommand(argv, i, getConf()).runAll();
       } else if (SetSpaceQuotaCommand.matches(cmd)) {
-        exitCode = new SetSpaceQuotaCommand(argv, i, getDFS()).runAll();
+        exitCode = new SetSpaceQuotaCommand(argv, i, getConf()).runAll();
       } else if ("-refreshServiceAcl".equals(cmd)) {
         exitCode = refreshServiceAcl();
       } else if ("-refreshUserToGroupsMappings".equals(cmd)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
index b87d69f..3838ca1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.hdfs.tools.snapshot;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.util.Tool;
@@ -41,6 +43,19 @@ import org.apache.hadoop.util.ToolRunner;
  */
 @InterfaceAudience.Private
 public class SnapshotDiff extends Configured implements Tool {
+  /**
+   * Construct a SnapshotDiff object.
+   */
+  public SnapshotDiff() {
+    this(new HdfsConfiguration());
+  }
+
+  /**
+   * Construct a SnapshotDiff object.
+   */
+  public SnapshotDiff(Configuration conf) {
+    super(conf);
+  }
   private static String getSnapshotName(String name) {
     if (Path.CUR_DIR.equals(name)) { // current directory
       return "";
@@ -72,8 +87,8 @@ public class SnapshotDiff extends Configured implements Tool {
       System.err.println("Usage: \n" + description);
       return 1;
     }
-    
-    FileSystem fs = FileSystem.get(getConf());
+
+    FileSystem fs = FileSystem.get(new Path(argv[0]).toUri(), getConf());
     if (! (fs instanceof DistributedFileSystem)) {
       System.err.println(
           "SnapshotDiff can only be used in DistributedFileSystem");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index d58dd9e..4bfb62c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -1246,6 +1246,32 @@ public class TestQuota {
         -1);
   }
 
+  /**
+   * Test to all the commands by passing the fully qualified path.
+   */
+  @Test(timeout = 30000)
+  public void testQuotaCommandsWithURI() throws Exception {
+    DFSAdmin dfsAdmin = new DFSAdmin(conf);
+    final Path dir = new Path("/" + this.getClass().getSimpleName(),
+        GenericTestUtils.getMethodName());
+    assertTrue(dfs.mkdirs(dir));
+
+    /* set space quota */
+    testSetAndClearSpaceQuotaRegularInternal(
+        new String[] { "-setSpaceQuota", "1024",
+            dfs.getUri() + "/" + dir.toString() }, dir, 0, 1024);
+
+    /* clear space quota */
+    testSetAndClearSpaceQuotaRegularInternal(
+        new String[] { "-clrSpaceQuota", dfs.getUri() + "/" + dir.toString() },
+        dir, 0, -1);
+    runCommand(dfsAdmin, false, "-setQuota", "1000",
+        dfs.getUri() + "/" + dir.toString());
+
+    runCommand(dfsAdmin, false, "-clrQuota",
+        dfs.getUri() + "/" + dir.toString());
+  }
+
   private void testSetAndClearSpaceQuotaRegularInternal(
       final String[] args,
       final Path dir,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
index eec4e99..1d5e071 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
@@ -23,6 +23,8 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -60,6 +62,7 @@ public class TestSnapshotCommands {
   @Before
   public void setUp() throws IOException {
     fs.mkdirs(new Path("/sub1"));
+    fs.mkdirs(new Path("/Fully/QPath"));
     fs.allowSnapshot(new Path("/sub1"));
     fs.mkdirs(new Path("/sub1/sub1sub1"));
     fs.mkdirs(new Path("/sub1/sub1sub2"));
@@ -161,4 +164,35 @@ public class TestSnapshotCommands {
     // now it can be deleted
     DFSTestUtil.FsShellRun("-rmr /sub1", conf);
   }
+
+  @Test (timeout=60000)
+  public void testSnapshotCommandsWithURI()throws Exception {
+    Configuration config = new HdfsConfiguration();
+    //fs.defaultFS should not be used, when path is fully qualified.
+    config.set("fs.defaultFS", "hdfs://127.0.0.1:1024");
+    String path = fs.getUri() + "/Fully/QPath";
+    DFSTestUtil.DFSAdminRun("-allowSnapshot " + path, 0,
+        "Allowing snaphot on " + path + " succeeded", config);
+    DFSTestUtil.FsShellRun("-createSnapshot " + path + " sn1", config);
+    // create file1
+    DFSTestUtil
+        .createFile(fs, new Path(fs.getUri() + "/Fully/QPath/File1"), 1024,
+            (short) 1, 100);
+    // create file2
+    DFSTestUtil
+        .createFile(fs, new Path(fs.getUri() + "/Fully/QPath/File2"), 1024,
+            (short) 1, 100);
+    DFSTestUtil.FsShellRun("-createSnapshot " + path + " sn2", config);
+    // verify the snapshotdiff using api and command line
+    SnapshotDiffReport report =
+        fs.getSnapshotDiffReport(new Path(path), "sn1", "sn2");
+    DFSTestUtil.toolRun(new SnapshotDiff(config), path + " sn1 sn2", 0,
+        report.toString());
+    DFSTestUtil.FsShellRun("-renameSnapshot " + path + " sn2 sn3", config);
+    DFSTestUtil.FsShellRun("-deleteSnapshot " + path + " sn1", config);
+    DFSTestUtil.FsShellRun("-deleteSnapshot " + path + " sn3", config);
+    DFSTestUtil.DFSAdminRun("-disallowSnapshot " + path, 0,
+        "Disallowing snaphot on " + path + " succeeded", config);
+    fs.delete(new Path("/Fully/QPath"), true);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to