Author: szetszwo Date: Fri Feb 21 20:08:08 2014 New Revision: 1570694 URL: http://svn.apache.org/r1570694 Log: Merge r1569890 through r1570692 from trunk.
Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml Propchange: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1570392-1570692 Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Feb 21 20:08:08 2014 @@ -424,6 +424,9 @@ Release 2.4.0 - UNRELEASED HDFS-5775. Consolidate the code for serialization in CacheManager (Haohui Mai via brandonli) + HDFS-5935. New Namenode UI FS browser should throw smarter error messages. + (Travis Thompson via jing9) + OPTIMIZATIONS HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery @@ -543,6 +546,11 @@ Release 2.4.0 - UNRELEASED HDFS-5944. LeaseManager:findLeaseWithPrefixPath can't handle path like /a/b/ and cause SecondaryNameNode failed do checkpoint (Yunjiong Zhao via brandonli) + HDFS-5982. Need to update snapshot manager when applying editlog for deleting + a snapshottable directory. (jing9) + + HDFS-5988. Bad fsimage always generated after upgrade. (wang) + BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) @@ -600,6 +608,9 @@ Release 2.4.0 - UNRELEASED HDFS-5959. Fix typo at section name in FSImageFormatProtobuf.java. (Akira Ajisaka via suresh) + HDFS-5981. PBImageXmlWriter generates malformed XML. + (Haohui Mai via cnauroth) + Release 2.3.1 - UNRELEASED INCOMPATIBLE CHANGES Propchange: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1570392-1570692 Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Feb 21 20:08:08 2014 @@ -1317,20 +1317,12 @@ public class FSDirectory implements Clos if (!deleteAllowed(inodesInPath, src) ) { filesRemoved = -1; } else { - // Before removing the node, first check if the targetNode is for a - // snapshottable dir with snapshots, or its descendants have - // snapshottable dir with snapshots - final INode targetNode = inodesInPath.getLastINode(); List<INodeDirectorySnapshottable> snapshottableDirs = new ArrayList<INodeDirectorySnapshottable>(); - checkSnapshot(targetNode, snapshottableDirs); + checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs); filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, removedINodes, now); - if (snapshottableDirs.size() > 0) { - // There are some snapshottable directories without snapshots to be - // deleted. Need to update the SnapshotManager. - namesystem.removeSnapshottableDirs(snapshottableDirs); - } + namesystem.removeSnapshottableDirs(snapshottableDirs); } } finally { writeUnlock(); @@ -1392,18 +1384,25 @@ public class FSDirectory implements Clos * @param src a string representation of a path to an inode * @param mtime the time the inode is removed * @throws SnapshotAccessControlException if path is in RO snapshot - */ + */ void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException, - QuotaExceededException, SnapshotAccessControlException { + QuotaExceededException, SnapshotAccessControlException, IOException { assert hasWriteLock(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List<INode> removedINodes = new ChunkedArrayList<INode>(); final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( normalizePath(src), false); - final long filesRemoved = deleteAllowed(inodesInPath, src) ? - unprotectedDelete(inodesInPath, collectedBlocks, - removedINodes, mtime) : -1; + long filesRemoved = -1; + if (deleteAllowed(inodesInPath, src)) { + List<INodeDirectorySnapshottable> snapshottableDirs = + new ArrayList<INodeDirectorySnapshottable>(); + checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs); + filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, + removedINodes, mtime); + namesystem.removeSnapshottableDirs(snapshottableDirs); + } + if (filesRemoved >= 0) { getFSNamesystem().removePathAndBlocks(src, collectedBlocks, removedINodes); Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Fri Feb 21 20:08:08 2014 @@ -700,9 +700,7 @@ public class FSImageFormat { localName = renameReservedComponentOnUpgrade(localName, getLayoutVersion()); INode inode = loadINode(localName, isSnapshotINode, in, counter); - if (updateINodeMap - && NameNodeLayoutVersion.supports( - LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) { + if (updateINodeMap) { namesystem.dir.addToInodeMap(inode); } return inode; Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java Fri Feb 21 20:08:08 2014 @@ -28,6 +28,8 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; @@ -63,6 +65,9 @@ import com.google.common.io.LimitInputSt * output of the lsr command. */ final class LsrPBImage { + + private static final Log LOG = LogFactory.getLog(LsrPBImage.class); + private final Configuration conf; private final PrintWriter out; private String[] stringTable; @@ -133,6 +138,10 @@ final class LsrPBImage { private void list(String parent, long dirId) { INode inode = inodes.get(dirId); + if (LOG.isTraceEnabled()) { + LOG.trace("Listing directory id " + dirId + " parent '" + parent + + "' (INode is " + inode + ")"); + } listINode(parent.isEmpty() ? "/" : parent, inode); long[] children = dirmap.get(dirId); if (children == null) { @@ -189,6 +198,9 @@ final class LsrPBImage { } private void loadINodeDirectorySection(InputStream in) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Loading directory section"); + } while (true) { INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry .parseDelimitedFrom(in); @@ -205,10 +217,21 @@ final class LsrPBImage { l[i] = refList.get(refId).getReferredId(); } dirmap.put(e.getParent(), l); + if (LOG.isDebugEnabled()) { + LOG.debug("Loaded directory (parent " + e.getParent() + + ") with " + e.getChildrenCount() + " children and " + + e.getRefChildrenCount() + " reference children"); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Loaded " + dirmap.size() + " directories"); } } private void loadINodeReferenceSection(InputStream in) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Loading inode reference section"); + } while (true) { INodeReferenceSection.INodeReference e = INodeReferenceSection .INodeReference.parseDelimitedFrom(in); @@ -216,24 +239,44 @@ final class LsrPBImage { break; } refList.add(e); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded inode reference named '" + e.getName() + + "' referring to id " + e.getReferredId() + ""); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Loaded " + refList.size() + " inode references"); } } private void loadINodeSection(InputStream in) throws IOException { INodeSection s = INodeSection.parseDelimitedFrom(in); + if (LOG.isDebugEnabled()) { + LOG.debug("Found " + s.getNumInodes() + " inodes in inode section"); + } for (int i = 0; i < s.getNumInodes(); ++i) { INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); inodes.put(p.getId(), p); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded inode id " + p.getId() + " type " + p.getType() + + " name '" + p.getName().toStringUtf8() + "'"); + } } } private void loadStringTable(InputStream in) throws IOException { StringTableSection s = StringTableSection.parseDelimitedFrom(in); + if (LOG.isDebugEnabled()) { + LOG.debug("Found " + s.getNumEntry() + " strings in string section"); + } stringTable = new String[s.getNumEntry() + 1]; for (int i = 0; i < s.getNumEntry(); ++i) { StringTableSection.Entry e = StringTableSection.Entry .parseDelimitedFrom(in); stringTable[e.getId()] = e.getStr(); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded string " + e.getStr()); + } } } } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java Fri Feb 21 20:08:08 2014 @@ -33,6 +33,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; /** * OfflineImageViewer to dump the contents of an Hadoop image file to XML or the @@ -164,7 +165,7 @@ public class OfflineImageViewerPB { } catch (IOException e) { System.err.println("Encountered exception. Exiting: " + e.getMessage()); } finally { - out.close(); + IOUtils.cleanup(null, out); } } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java Fri Feb 21 20:08:08 2014 @@ -78,7 +78,7 @@ public final class PBImageXmlWriter { FileInputStream fin = null; try { fin = new FileInputStream(file.getFD()); - out.print("<?xml version=\"1.0\"?>\n"); + out.print("<?xml version=\"1.0\"?>\n<fsimage>"); ArrayList<FileSummary.Section> sections = Lists.newArrayList(summary .getSectionsList()); @@ -138,6 +138,7 @@ public final class PBImageXmlWriter { break; } } + out.print("</fsimage>\n"); } finally { IOUtils.cleanup(null, fin); } @@ -229,6 +230,7 @@ public final class PBImageXmlWriter { } dumpINodeReference(e); } + out.print("</INodeReferenceSection>"); } private void dumpINodeReference(INodeReferenceSection.INodeReference r) { @@ -301,7 +303,7 @@ public final class PBImageXmlWriter { .o("genstampV1Limit", s.getGenstampV1Limit()) .o("lastAllocatedBlockId", s.getLastAllocatedBlockId()) .o("txid", s.getTransactionId()); - out.print("<NameSection>\n"); + out.print("</NameSection>\n"); } private String dumpPermission(long permission) { @@ -375,7 +377,7 @@ public final class PBImageXmlWriter { } out.print("</diff>"); } - out.print("<SnapshotDiffSection>\n"); + out.print("</SnapshotDiffSection>\n"); } private void dumpSnapshotSection(InputStream in) throws IOException { Propchange: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1569890-1570692 Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js Fri Feb 21 20:08:08 2014 @@ -66,10 +66,23 @@ function network_error_handler(url) { return function (jqxhr, text, err) { - var msg = '<p>Failed to retreive data from ' + url + ', cause: ' + err + '</p>'; - if (url.indexOf('/webhdfs/v1') === 0) { - msg += '<p>WebHDFS might be disabled. WebHDFS is required to browse the filesystem.</p>'; - } + switch(jqxhr.status) { + case 401: + var msg = '<p>Authentication failed when trying to open ' + url + ': Unauthrozied.</p>'; + break; + case 403: + if(jqxhr.responseJSON !== undefined && jqxhr.responseJSON.RemoteException !== undefined) { + var msg = '<p>' + jqxhr.responseJSON.RemoteException.message + "</p>"; + break; + } + var msg = '<p>Permission denied when trying to open ' + url + ': ' + err + '</p>'; + break; + case 404: + var msg = '<p>Path does not exist on HDFS or WebHDFS is disabled. Please check your path or enable WebHDFS</p>'; + break; + default: + var msg = '<p>Failed to retreive data from ' + url + ': ' + err + '</p>'; + } show_err_msg(msg); }; } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Fri Feb 21 20:08:08 2014 @@ -330,13 +330,14 @@ public class TestDFSUpgradeFromImage { * paths to test renaming on upgrade */ @Test - public void testUpgradeFromRel2ReservedImage() throws IOException { + public void testUpgradeFromRel2ReservedImage() throws Exception { unpackStorage(HADOOP2_RESERVED_IMAGE); MiniDFSCluster cluster = null; // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); try { cluster = - new MiniDFSCluster.Builder(new Configuration()) + new MiniDFSCluster.Builder(conf) .format(false) .startupOption(StartupOption.UPGRADE) .numDataNodes(0).build(); @@ -355,28 +356,15 @@ public class TestDFSUpgradeFromImage { ".snapshot=.user-snapshot," + ".reserved=.my-reserved"); cluster = - new MiniDFSCluster.Builder(new Configuration()) + new MiniDFSCluster.Builder(conf) .format(false) .startupOption(StartupOption.UPGRADE) .numDataNodes(0).build(); - // Make sure the paths were renamed as expected DistributedFileSystem dfs = cluster.getFileSystem(); - ArrayList<Path> toList = new ArrayList<Path>(); - ArrayList<String> found = new ArrayList<String>(); - toList.add(new Path("/")); - while (!toList.isEmpty()) { - Path p = toList.remove(0); - FileStatus[] statuses = dfs.listStatus(p); - for (FileStatus status: statuses) { - final String path = status.getPath().toUri().getPath(); - System.out.println("Found path " + path); - found.add(path); - if (status.isDirectory()) { - toList.add(status.getPath()); - } - } - } - String[] expected = new String[] { + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { "/edits", "/edits/.reserved", "/edits/.user-snapshot", @@ -393,12 +381,33 @@ public class TestDFSUpgradeFromImage { "/.my-reserved/edits-touch", "/.my-reserved/image-touch" }; - - for (String s: expected) { - assertTrue("Did not find expected path " + s, found.contains(s)); + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList<Path> toList = new ArrayList<Path>(); + toList.add(new Path("/")); + ArrayList<String> found = new ArrayList<String>(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); } - assertEquals("Found an unexpected path while listing filesystem", - found.size(), expected.length); } finally { if (cluster != null) { cluster.shutdown(); Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Fri Feb 21 20:08:08 2014 @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.Distribute import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; @@ -128,7 +129,42 @@ public class TestSnapshotDeletion { exception.expectMessage(error); hdfs.delete(sub, true); } - + + /** + * Test applying editlog of operation which deletes a snapshottable directory + * without snapshots. The snapshottable dir list in snapshot manager should be + * updated. + */ + @Test (timeout=300000) + public void testApplyEditLogForDeletion() throws Exception { + final Path foo = new Path("/foo"); + final Path bar1 = new Path(foo, "bar1"); + final Path bar2 = new Path(foo, "bar2"); + hdfs.mkdirs(bar1); + hdfs.mkdirs(bar2); + + // allow snapshots on bar1 and bar2 + hdfs.allowSnapshot(bar1); + hdfs.allowSnapshot(bar2); + assertEquals(2, cluster.getNamesystem().getSnapshotManager() + .getNumSnapshottableDirs()); + assertEquals(2, cluster.getNamesystem().getSnapshotManager() + .getSnapshottableDirs().length); + + // delete /foo + hdfs.delete(foo, true); + cluster.restartNameNode(0); + // the snapshottable dir list in snapshot manager should be empty + assertEquals(0, cluster.getNamesystem().getSnapshotManager() + .getNumSnapshottableDirs()); + assertEquals(0, cluster.getNamesystem().getSnapshotManager() + .getSnapshottableDirs().length); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + hdfs.saveNamespace(); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + cluster.restartNameNode(0); + } + /** * Deleting directory with snapshottable descendant with snapshots must fail. */ Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Fri Feb 21 20:08:08 2014 @@ -18,23 +18,24 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; import java.io.RandomAccessFile; +import java.io.StringReader; import java.io.StringWriter; import java.util.HashMap; -import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.parsers.SAXParser; +import javax.xml.parsers.SAXParserFactory; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -43,21 +44,23 @@ import org.apache.hadoop.fs.FSDataOutput import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.PathUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; +import org.xml.sax.helpers.DefaultHandler; + +import com.google.common.collect.Maps; /** * Test function of OfflineImageViewer by: * confirming it can correctly process @@ -85,7 +88,7 @@ public class TestOfflineImageViewer { } // namespace as written to dfs, to be compared with viewer's output - final static HashMap<String, FileStatus> writtenFiles = new HashMap<String, FileStatus>(); + final static HashMap<String, FileStatus> writtenFiles = Maps.newHashMap(); @Rule public TemporaryFolder folder = new TemporaryFolder(); @@ -98,7 +101,7 @@ public class TestOfflineImageViewer { public static void createOriginalFSImage() throws IOException { MiniDFSCluster cluster = null; try { - Configuration conf = new HdfsConfiguration(); + Configuration conf = new Configuration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000); conf.setLong( @@ -107,11 +110,9 @@ public class TestOfflineImageViewer { DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); - FileSystem hdfs = cluster.getFileSystem(); - - int filesize = 256; + DistributedFileSystem hdfs = cluster.getFileSystem(); // Create a reasonable namespace for (int i = 0; i < NUM_DIRS; i++) { @@ -121,7 +122,7 @@ public class TestOfflineImageViewer { for (int j = 0; j < FILES_PER_DIR; j++) { Path file = new Path(dir, "file" + j); FSDataOutputStream o = hdfs.create(file); - o.write(new byte[filesize++]); + o.write(23); o.close(); writtenFiles.put(file.toString(), @@ -136,10 +137,15 @@ public class TestOfflineImageViewer { LOG.debug("got token " + t); } + final Path snapshot = new Path("/snapshot"); + hdfs.mkdirs(snapshot); + hdfs.allowSnapshot(snapshot); + hdfs.mkdirs(new Path("/snapshot/1")); + hdfs.delete(snapshot, true); + // Write results to the fsimage file - cluster.getNameNodeRpc() - .setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); - cluster.getNameNodeRpc().saveNamespace(); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); + hdfs.saveNamespace(); // Determine location of fsimage file originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil @@ -249,4 +255,17 @@ public class TestOfflineImageViewer { int totalFiles = Integer.parseInt(matcher.group(1)); assertEquals(totalFiles, NUM_DIRS * FILES_PER_DIR); } + + @Test + public void testPBImageXmlWriter() throws IOException, SAXException, + ParserConfigurationException { + StringWriter output = new StringWriter(); + PrintWriter o = new PrintWriter(output); + PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o); + v.visit(new RandomAccessFile(originalFsimage, "r")); + SAXParserFactory spf = SAXParserFactory.newInstance(); + SAXParser parser = spf.newSAXParser(); + final String xml = output.getBuffer().toString(); + parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler()); + } } Modified: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml?rev=1570694&r1=1570693&r2=1570694&view=diff ============================================================================== --- hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml (original) +++ hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testAclCLI.xml Fri Feb 21 20:08:08 2014 @@ -911,5 +911,66 @@ </comparator> </comparators> </test> + <test> + <description>setfacl: recursive modify entries with mix of files and directories</description> + <test-commands> + <command>-fs NAMENODE -mkdir -p /dir1</command> + <command>-fs NAMENODE -touchz /dir1/file1</command> + <command>-fs NAMENODE -mkdir -p /dir1/dir2</command> + <command>-fs NAMENODE -touchz /dir1/dir2/file2</command> + <command>-fs NAMENODE -setfacl -R -m user:charlie:rwx,default:user:charlie:r-x /dir1</command> + <command>-fs NAMENODE -getfacl -R /dir1</command> + </test-commands> + <cleanup-commands> + <command>-fs NAMENODE -rm -R /dir1</command> + </cleanup-commands> + <comparators> + <comparator> + <type>ExactComparator</type> + <expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF#</expected-output> + </comparator> + </comparators> + </test> + <test> + <description>setfacl: recursive remove entries with mix of files and directories</description> + <test-commands> + <command>-fs NAMENODE -mkdir -p /dir1</command> + <command>-fs NAMENODE -touchz /dir1/file1</command> + <command>-fs NAMENODE -mkdir -p /dir1/dir2</command> + <command>-fs NAMENODE -touchz /dir1/dir2/file2</command> + <command>-fs NAMENODE -setfacl -R -m user:bob:rwx,user:charlie:rwx,default:user:bob:rwx,default:user:charlie:r-x /dir1</command> + <command>-fs NAMENODE -setfacl -R -x user:bob,default:user:bob /dir1</command> + <command>-fs NAMENODE -getfacl -R /dir1</command> + </test-commands> + <cleanup-commands> + <command>-fs NAMENODE -rm -R /dir1</command> + </cleanup-commands> + <comparators> + <comparator> + <type>ExactComparator</type> + <expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rw-#LF#user:charlie:rwx#LF#group::r--#LF#mask::rwx#LF#other::r--#LF##LF#</expected-output> + </comparator> + </comparators> + </test> + <test> + <description>setfacl: recursive set with mix of files and directories</description> + <test-commands> + <command>-fs NAMENODE -mkdir -p /dir1</command> + <command>-fs NAMENODE -touchz /dir1/file1</command> + <command>-fs NAMENODE -mkdir -p /dir1/dir2</command> + <command>-fs NAMENODE -touchz /dir1/dir2/file2</command> + <command>-fs NAMENODE -setfacl -R --set user::rwx,user:charlie:rwx,group::r-x,other::r-x,default:user:charlie:r-x /dir1</command> + <command>-fs NAMENODE -getfacl -R /dir1</command> + </test-commands> + <cleanup-commands> + <command>-fs NAMENODE -rm -R /dir1</command> + </cleanup-commands> + <comparators> + <comparator> + <type>ExactComparator</type> + <expected-output># file: /dir1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF#default:user::rwx#LF#default:user:charlie:r-x#LF#default:group::r-x#LF#default:mask::r-x#LF#default:other::r-x#LF##LF## file: /dir1/dir2/file2#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF##LF## file: /dir1/file1#LF## owner: USERNAME#LF## group: supergroup#LF#user::rwx#LF#user:charlie:rwx#LF#group::r-x#LF#mask::rwx#LF#other::r-x#LF##LF#</expected-output> + </comparator> + </comparators> + </test> </tests> </configuration>