Author: wang Date: Thu Jul 24 23:44:26 2014 New Revision: 1613331 URL: http://svn.apache.org/r1613331 Log: HDFS-6696. Name node cannot start if the path of a file under construction contains .snapshot. (wang)
Added: hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz - copied unchanged from r1613329, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz - copied unchanged from r1613329, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz Modified: hadoop/common/branches/branch-2.5/hadoop-hdfs-project/ (props changed) hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Propchange: hadoop/common/branches/branch-2.5/hadoop-hdfs-project/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project:r1613329 Propchange: hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1613329 Modified: hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1613331&r1=1613330&r2=1613331&view=diff ============================================================================== --- hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jul 24 23:44:26 2014 @@ -543,6 +543,9 @@ Release 2.5.0 - UNRELEASED HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code when xattr doesn't exist. (Charles Lamb via umamahesh) + HDFS-6696. Name node cannot start if the path of a file under + construction contains ".snapshot". (wang) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) Propchange: hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1613329 Modified: hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1613331&r1=1613330&r2=1613331&view=diff ============================================================================== --- hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Thu Jul 24 23:44:26 2014 @@ -620,6 +620,16 @@ public class FSImageFormat { INodeDirectory parentINode = fsDir.rootDir; for (long i = 0; i < numFiles; i++) { pathComponents = FSImageSerialization.readPathComponents(in); + for (int j=0; j < pathComponents.length; j++) { + byte[] newComponent = renameReservedComponentOnUpgrade + (pathComponents[j], getLayoutVersion()); + if (!Arrays.equals(newComponent, pathComponents[j])) { + String oldPath = DFSUtil.byteArray2PathString(pathComponents); + pathComponents[j] = newComponent; + String newPath = DFSUtil.byteArray2PathString(pathComponents); + LOG.info("Renaming reserved path " + oldPath + " to " + newPath); + } + } final INode newNode = loadINode( pathComponents[pathComponents.length-1], false, in, counter); @@ -933,6 +943,7 @@ public class FSImageFormat { oldnode = namesystem.dir.getInode(cons.getId()).asFile(); inSnapshot = true; } else { + path = renameReservedPathsOnUpgrade(path, getLayoutVersion()); final INodesInPath iip = fsDir.getLastINodeInPath(path); oldnode = INodeFile.valueOf(iip.getINode(0), path); } Modified: hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1613331&r1=1613330&r2=1613331&view=diff ============================================================================== --- hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original) +++ hadoop/common/branches/branch-2.5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Thu Jul 24 23:44:26 2014 @@ -70,6 +70,9 @@ public class TestDFSUpgradeFromImage { private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz"; + private static final String HADOOP1_RESERVED_IMAGE = "hadoop-1-reserved.tgz"; + private static final String HADOOP023_RESERVED_IMAGE = + "hadoop-0.23-reserved.tgz"; private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz"; private static class ReferenceFileInfo { @@ -326,6 +329,140 @@ public class TestDFSUpgradeFromImage { } /** + * Test upgrade from a branch-1.2 image with reserved paths + */ + @Test + public void testUpgradeFromRel1ReservedImage() throws Exception { + unpackStorage(HADOOP1_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(conf) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { + "/.my-reserved", + "/.user-snapshot", + "/.user-snapshot/.user-snapshot", + "/.user-snapshot/open", + "/dir1", + "/dir1/.user-snapshot", + "/dir2", + "/dir2/.user-snapshot", + "/user", + "/user/andrew", + "/user/andrew/.user-snapshot", + }; + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList<Path> toList = new ArrayList<Path>(); + toList.add(new Path("/")); + ArrayList<String> found = new ArrayList<String>(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + /** + * Test upgrade from a 0.23.11 image with reserved paths + */ + @Test + public void testUpgradeFromRel023ReservedImage() throws Exception { + unpackStorage(HADOOP023_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(conf) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { + "/.user-snapshot", + "/dir1", + "/dir1/.user-snapshot", + "/dir2", + "/dir2/.user-snapshot" + }; + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList<Path> toList = new ArrayList<Path>(); + toList.add(new Path("/")); + ArrayList<String> found = new ArrayList<String>(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + /** * Test upgrade from 2.0 image with a variety of .snapshot and .reserved * paths to test renaming on upgrade */