[ https://issues.apache.org/jira/browse/HDFS-16008?focusedWorklogId=594652&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-594652 ]
ASF GitHub Bot logged work on HDFS-16008: ----------------------------------------- Author: ASF GitHub Bot Created on: 11/May/21 17:34 Start Date: 11/May/21 17:34 Worklog Time Spent: 10m Work Description: goiri commented on a change in pull request #2981: URL: https://github.com/apache/hadoop/pull/2981#discussion_r630388995 ########## File path: hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java ########## @@ -1036,6 +1054,81 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) return updateResponse.getStatus(); } + /** + * initViewFsToMountTable. + * @param clusterName The specified cluster to initialize. Review comment: Mention the allClusters. ########## File path: hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java ########## @@ -700,6 +707,117 @@ public void testAddMountTableIfParentExist() throws Exception { } } + @Test + public void testInitViewFsToMountTable() throws Exception { + // re-set system out for testing + System.setOut(new PrintStream(out)); + stateStore.loadCache(MountTableStoreImpl.class, true); + String nnAddress = cluster.getRandomNamenode(). + getNamenode().getHostAndPort(); + String baseDir = "/initViewFs"; + String src1 = baseDir + "/data1"; + Path destPath1 = new Path("hdfs://" + nnAddress + src1); + String user1 = "user1"; + String group1 = "group1"; + String clusterName1 = "ClusterX"; + + String src2 = baseDir + "/data2"; + String clusterName2 = "ClusterY"; + + String src3 = baseDir + "/inExistent"; + Path destPath3 = new Path("hdfs://" + nnAddress + src3); + String clusterName3 = "ClusterZ"; + + // 0.mkdir destPath + hdfs.mkdirs(destPath1); + // 1.set owner + hdfs.setOwner(destPath1, user1, group1); + // 2.set viewFs mapping + // Use different clusterName and mount points + admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + + clusterName1 + ".link." + src1, destPath1.toString()); + admin.getConf().set(CONFIG_VIEWFS_PREFIX + "." + + clusterName2 + ".link." + src2, destPath1.toString()); + + // 3.run initialization,Specify a ClusterName + String[] argv = new String[]{"-initViewFsToMountTable", clusterName1}; + assertEquals(0, ToolRunner.run(admin, argv)); + // 4.gets the mount point entries + stateStore.loadCache(MountTableStoreImpl.class, true); + GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest + .newInstance(src1); + GetMountTableEntriesResponse getResponse = client.getMountTableManager() + .getMountTableEntries(getRequest); + List<MountTable> mountTables = getResponse.getEntries(); + // 5.Checking + assertEquals(1, mountTables.size()); + MountTable mountTable = mountTables.get(0); + List<RemoteLocation> destinations = mountTable.getDestinations(); + assertEquals(1, destinations.size()); + assertEquals(user1, mountTable.getOwnerName()); + assertEquals(group1, mountTable.getGroupName()); + assertEquals(destPath1.toUri().getPath(), mountTable. + getDestinations().get(0).getDest()); + assertEquals(nnAddress, mountTable. + getDestinations().get(0).getNameserviceId()); + assertEquals(src1, mountTable.getSourcePath()); + + // Specify allCluster to initialize all mappings + argv = new String[]{"-rm", src1}; + assertEquals(0, ToolRunner.run(admin, argv)); + stateStore.loadCache(MountTableStoreImpl.class, true); + argv = new String[]{"-initViewFsToMountTable", "allClusters"}; + assertEquals(0, ToolRunner.run(admin, argv)); + + stateStore.loadCache(MountTableStoreImpl.class, true); Review comment: This test got massive can we refactor and split? ########## File path: hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java ########## @@ -1036,6 +1054,81 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) return updateResponse.getStatus(); } + /** + * initViewFsToMountTable. + * @param clusterName The specified cluster to initialize. + * @return If the quota was updated. + * @throws IOException Error adding the mount point. + */ + public boolean initViewFsToMountTable(String clusterName) + throws IOException { + // fs.viewfs.mounttable.ClusterX.link./data + final String mountTablePrefix; + if (clusterName.equals("allClusters")) { Review comment: Maybe make allClusters a constant. ########## File path: hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java ########## @@ -1036,6 +1054,81 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) return updateResponse.getStatus(); } + /** + * initViewFsToMountTable. Review comment: Extend this a little. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 594652) Time Spent: 2h 40m (was: 2.5h) > RBF: Tool to initialize ViewFS Mapping to Router > ------------------------------------------------ > > Key: HDFS-16008 > URL: https://issues.apache.org/jira/browse/HDFS-16008 > Project: Hadoop HDFS > Issue Type: Improvement > Components: rbf > Affects Versions: 3.3.1 > Reporter: zhu > Assignee: zhu > Priority: Major > Labels: pull-request-available > Time Spent: 2h 40m > Remaining Estimate: 0h > > This is a tool for initializing ViewFS Mapping to Router. > Some companies are currently migrating from viewfs to router, I think they > need this tool. -- This message was sent by Atlassian Jira (v8.3.4#803005) --------------------------------------------------------------------- To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org