HDFS-11968. ViewFS: StoragePolicies commands fail with HDFS federation. Contributed by Mukul Kumar Singh.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9130511 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9130511 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9130511 Branch: refs/heads/HDFS-10467 Commit: b91305119b434d23b99ae7e755aea6639f48b6ab Parents: 4d5dd75 Author: Arpit Agarwal <a...@apache.org> Authored: Tue Oct 3 11:23:40 2017 -0700 Committer: Arpit Agarwal <a...@apache.org> Committed: Tue Oct 3 11:23:40 2017 -0700 ---------------------------------------------------------------------- .../org/apache/hadoop/fs/viewfs/InodeTree.java | 3 +- .../hadoop/hdfs/tools/StoragePolicyAdmin.java | 61 +++++++++------ .../hdfs/tools/TestStoragePolicyCommands.java | 11 +-- .../tools/TestViewFSStoragePolicyCommands.java | 80 ++++++++++++++++++++ .../tools/TestWebHDFSStoragePolicyCommands.java | 42 ++++++++++ 5 files changed, 168 insertions(+), 29 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9130511/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 665c9c9..7274acd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -407,7 +407,8 @@ abstract class InodeTree<T> { for (int j = 1; j <= i; ++j) { failedAt.append('/').append(path[j]); } - throw (new FileNotFoundException(failedAt.toString())); + throw (new FileNotFoundException("File/Directory does not exist: " + + failedAt.toString())); } if (nextInode instanceof INodeLink) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9130511/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java index 9c7d048..d5e5b4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs.tools; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.BlockStoragePolicySpi; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -29,6 +31,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -100,11 +103,12 @@ public class StoragePolicyAdmin extends Configured implements Tool { @Override public int run(Configuration conf, List<String> args) throws IOException { - final DistributedFileSystem dfs = AdminHelper.getDFS(conf); + final FileSystem fs = FileSystem.get(conf); try { - Collection<BlockStoragePolicy> policies = dfs.getAllStoragePolicies(); + Collection<? extends BlockStoragePolicySpi> policies = + fs.getAllStoragePolicies(); System.out.println("Block Storage Policies:"); - for (BlockStoragePolicy policy : policies) { + for (BlockStoragePolicySpi policy : policies) { if (policy != null) { System.out.println("\t" + policy); } @@ -149,32 +153,43 @@ public class StoragePolicyAdmin extends Configured implements Tool { } Path p = new Path(path); - final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf); + final FileSystem fs = FileSystem.get(conf); try { - HdfsFileStatus status = dfs.getClient().getFileInfo( - Path.getPathWithoutSchemeAndAuthority(p).toString()); - if (status == null) { + FileStatus status; + try { + status = fs.getFileStatus(p); + } catch (FileNotFoundException e) { System.err.println("File/Directory does not exist: " + path); return 2; } - byte storagePolicyId = status.getStoragePolicy(); - if (storagePolicyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { - System.out.println("The storage policy of " + path + " is unspecified"); - return 0; - } - Collection<BlockStoragePolicy> policies = dfs.getAllStoragePolicies(); - for (BlockStoragePolicy policy : policies) { - if (policy.getId() == storagePolicyId) { - System.out.println("The storage policy of " + path + ":\n" + policy); + + if (status instanceof HdfsFileStatus) { + byte storagePolicyId = ((HdfsFileStatus)status).getStoragePolicy(); + if (storagePolicyId == + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { + System.out.println("The storage policy of " + path + + " is unspecified"); return 0; } + Collection<? extends BlockStoragePolicySpi> policies = + fs.getAllStoragePolicies(); + for (BlockStoragePolicySpi policy : policies) { + if (policy instanceof BlockStoragePolicy) { + if (((BlockStoragePolicy)policy).getId() == storagePolicyId) { + System.out.println("The storage policy of " + path + + ":\n" + policy); + return 0; + } + } + } } + System.err.println(getName() + " is not supported for filesystem " + + fs.getScheme() + " on path " + path); + return 2; } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); return 2; } - System.err.println("Cannot identify the storage policy for " + path); - return 2; } } @@ -218,9 +233,9 @@ public class StoragePolicyAdmin extends Configured implements Tool { return 1; } Path p = new Path(path); - final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf); + final FileSystem fs = FileSystem.get(conf); try { - dfs.setStoragePolicy(p, policyName); + fs.setStoragePolicy(p, policyName); System.out.println("Set storage policy " + policyName + " on " + path); } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); @@ -264,9 +279,9 @@ public class StoragePolicyAdmin extends Configured implements Tool { } Path p = new Path(path); - final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf); + final FileSystem fs = FileSystem.get(conf); try { - dfs.unsetStoragePolicy(p); + fs.unsetStoragePolicy(p); System.out.println("Unset storage policy from " + path); } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9130511/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java index 149dabb..f31c739 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java @@ -18,11 +18,12 @@ package org.apache.hadoop.hdfs.tools; import java.io.IOException; +import java.net.URISyntaxException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; @@ -38,12 +39,12 @@ public class TestStoragePolicyCommands { private static final short REPL = 1; private static final int SIZE = 128; - private static Configuration conf; - private static MiniDFSCluster cluster; - private static DistributedFileSystem fs; + protected static Configuration conf; + protected static MiniDFSCluster cluster; + protected static FileSystem fs; @Before - public void clusterSetUp() throws IOException { + public void clusterSetUp() throws IOException, URISyntaxException { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build(); cluster.waitActive(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9130511/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java new file mode 100644 index 0000000..b3bb3c4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FsConstants; + +import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; + +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; + +/** + * Test StoragePolicyAdmin commands with ViewFileSystem. + */ +public class TestViewFSStoragePolicyCommands extends TestStoragePolicyCommands { + + @Before + public void clusterSetUp() throws IOException { + conf = new HdfsConfiguration(); + String clusterName = "cluster"; + cluster = + new MiniDFSCluster.Builder(conf).nnTopology( + MiniDFSNNTopology.simpleFederatedTopology(2)) + .numDataNodes(2) + .build(); + cluster.waitActive(); + DistributedFileSystem hdfs1 = cluster.getFileSystem(0); + DistributedFileSystem hdfs2 = cluster.getFileSystem(1); + + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, + FsConstants.VIEWFS_SCHEME +"://" + clusterName); + + Path base1 = new Path("/user1"); + Path base2 = new Path("/user2"); + hdfs1.delete(base1, true); + hdfs2.delete(base2, true); + hdfs1.mkdirs(base1); + hdfs2.mkdirs(base2); + ConfigUtil.addLink(conf, clusterName, "/foo", + hdfs1.makeQualified(base1).toUri()); + ConfigUtil.addLink(conf, clusterName, "/hdfs2", + hdfs2.makeQualified(base2).toUri()); + fs = FileSystem.get(conf); + } + + /** + * Storage policy operation on the viewfs root should fail. + */ + @Test + public void testStoragePolicyRoot() throws Exception { + final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf); + DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /", 2, + "is not supported for filesystem viewfs on path /"); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9130511/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java new file mode 100644 index 0000000..f10205c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; +import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; +import org.junit.Before; + +import java.io.IOException; +import java.net.URISyntaxException; + +/** + * Test StoragePolicyAdmin commands with WebHDFS. + */ +public class TestWebHDFSStoragePolicyCommands + extends TestStoragePolicyCommands { + + @Before + public void clusterSetUp() throws IOException, URISyntaxException { + super.clusterSetUp(); + fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsConstants.WEBHDFS_SCHEME); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, + fs.getUri().toString()); + } +} --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org